elasticsearch-jdbc
elasticsearch-jdbc copied to clipboard
Unable to import data from mysql
i am trying to import the data from mysql using jdbc importer.
elastic search server version 1.5.2
jdbc version - 1.5.2.0
index is created properly but still i am not able to import any data..
srcpit is
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" bin=${DIR}/../bin lib=${DIR}/../lib
echo '
{
"type" : "jdbc",
"jdbc" : {
"user": "root",
"url": "jdbc:mysql://localhost:3306/smart_api",
"password": "root",
"elasticsearch": {
"host": "10.253.129.68",
"cluster": "elasticsearch",
"port": "9300"
},
"schedule" : "0 0-59 0-23 ? * ",
"locale" : "en_US",
"index" : "smart_users_v1",
"statefile" : "smart_users_v1.json",
"metrics" : {
"lastexecutionstart" : "2016-01-01T18:36:00.052Z",
"lastexecutionend" : "2016-01-01T18:36:28.760Z",
"counter" : "1235",
"enabled" : "true",
"logger" : {
"json" : "true",
"plain" : "false"
}
},
"type" : "users_detail",
"index_settings" : {
"index" : {
"number_of_shards" : 1
}
},
"sql" : [
{
"statement" : "select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?)",
"parameter" : [ "$metrics.lastexecutionstart" ]
}
]
}
}' | java
-cp "${lib}/"
-Dlog4j.configurationFile=${bin}/log4j2.xml
org.xbib.tools.Runner
org.xbib.tools.JDBCImporter
#############################################################
logs of jdbc are
[15:45:00,002][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:45:00,004][INFO ][importer.jdbc ][pool-6-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@39c9c7b5 [15:45:00,005][INFO ][importer.jdbc.context.standard][pool-6-thread-1] found sink class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSink@4d7d066b [15:45:00,006][INFO ][importer.jdbc.context.standard][pool-6-thread-1] found source class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSource@4a010319 [15:45:00,008][INFO ][BaseTransportClient ][pool-6-thread-1] creating transport client, java version 1.8.0_121, effective settings {cluster.name=elasticsearch, host.0=10.253.129.68, port=9300, sniff=false, autodiscover=false, name=importer, client.transport.ignore_cluster_name=false, client.transport.ping_timeout=5s, client.transport.nodes_sampler_interval=5s} [15:45:00,010][INFO ][org.elasticsearch.plugins][pool-6-thread-1] [importer] loaded [helper], sites [] [15:45:00,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":59989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:45:00,015][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":59499,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:45:00,082][INFO ][BaseTransportClient ][pool-6-thread-1] trying to connect to [inet[/10.253.129.68:9300]] [15:45:00,101][INFO ][BaseTransportClient ][pool-6-thread-1] connected to [[Space Phantom][tX4xYWcpTzOF28C726YqRQ][tarun][inet[/10.253.129.68:9300]]] [15:45:00,101][INFO ][importer.jdbc.sink.standard][pool-6-thread-1] creating index smart_users_v1 with settings = {index.number_of_shards=1} and mappings = [15:45:00,101][INFO ][org.xbib.elasticsearch.helper.client.BaseIngestTransportClient][pool-6-thread-1] settings = {index={number_of_shards=1}} [15:45:00,156][WARN ][importer.jdbc.sink.standard][pool-6-thread-1] [smart_users_v1] already exists [15:45:00,187][INFO ][importer.jdbc.context.standard][pool-6-thread-1] state persisted to smart_users_v1.json [15:45:30,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":89989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:45:30,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":29913,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:46:00,001][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:46:00,003][INFO ][importer.jdbc ][pool-7-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@65e6fa7d [15:46:00,004][INFO ][importer.jdbc.context.standard][pool-7-thread-1] found sink class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSink@110e4b3c [15:46:00,005][INFO ][importer.jdbc.context.standard][pool-7-thread-1] found source class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSource@33cb97c5 [15:46:00,007][INFO ][BaseTransportClient ][pool-7-thread-1] creating transport client, java version 1.8.0_121, effective settings {cluster.name=elasticsearch, host.0=10.253.129.68, port=9300, sniff=false, autodiscover=false, name=importer, client.transport.ignore_cluster_name=false, client.transport.ping_timeout=5s, client.transport.nodes_sampler_interval=5s} [15:46:00,009][INFO ][org.elasticsearch.plugins][pool-7-thread-1] [importer] loaded [helper], sites [] [15:46:00,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":119989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:46:00,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":59913,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:46:00,040][INFO ][BaseTransportClient ][pool-7-thread-1] trying to connect to [inet[/10.253.129.68:9300]] [15:46:00,048][INFO ][BaseTransportClient ][pool-7-thread-1] connected to [[Space Phantom][tX4xYWcpTzOF28C726YqRQ][tarun][inet[/10.253.129.68:9300]]] [15:46:00,048][INFO ][importer.jdbc.sink.standard][pool-7-thread-1] creating index smart_users_v1 with settings = {index.number_of_shards=1} and mappings = [15:46:00,048][INFO ][org.xbib.elasticsearch.helper.client.BaseIngestTransportClient][pool-7-thread-1] settings = {index={number_of_shards=1}} [15:46:00,054][WARN ][importer.jdbc.sink.standard][pool-7-thread-1] [smart_users_v1] already exists [15:46:00,081][INFO ][importer.jdbc.context.standard][pool-7-thread-1] state persisted to smart_users_v1.json [15:46:30,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":149989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:46:30,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":29966,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:47:00,001][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:47:00,005][INFO ][importer.jdbc ][pool-8-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@6f779f2b [15:47:00,006][INFO ][importer.jdbc.context.standard][pool-8-thread-1] found sink class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSink@1b95a4dc [15:47:00,007][INFO ][importer.jdbc.context.standard][pool-8-thread-1] found source class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSource@6a7ca4e5 [15:47:00,009][INFO ][BaseTransportClient ][pool-8-thread-1] creating transport client, java version 1.8.0_121, effective settings {cluster.name=elasticsearch, host.0=10.253.129.68, port=9300, sniff=false, autodiscover=false, name=importer, client.transport.ignore_cluster_name=false, client.transport.ping_timeout=5s, client.transport.nodes_sampler_interval=5s} [15:47:00,012][INFO ][org.elasticsearch.plugins][pool-8-thread-1] [importer] loaded [helper], sites [] [15:47:00,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":179989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:47:00,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":59966,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:47:00,044][INFO ][BaseTransportClient ][pool-8-thread-1] trying to connect to [inet[/10.253.129.68:9300]] [15:47:00,053][INFO ][BaseTransportClient ][pool-8-thread-1] connected to [[Space Phantom][tX4xYWcpTzOF28C726YqRQ][tarun][inet[/10.253.129.68:9300]]] [15:47:00,054][INFO ][importer.jdbc.sink.standard][pool-8-thread-1] creating index smart_users_v1 with settings = {index.number_of_shards=1} and mappings = [15:47:00,054][INFO ][org.xbib.elasticsearch.helper.client.BaseIngestTransportClient][pool-8-thread-1] settings = {index={number_of_shards=1}} [15:47:00,058][WARN ][importer.jdbc.sink.standard][pool-8-thread-1] [smart_users_v1] already exists [15:47:00,080][INFO ][importer.jdbc.context.standard][pool-8-thread-1] state persisted to smart_users_v1.json [15:47:30,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":209989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:47:30,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":29960,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:48:00,001][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:48:00,006][INFO ][importer.jdbc ][pool-9-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@bb4c108 [15:48:00,007][INFO ][importer.jdbc.context.standard][pool-9-thread-1] found sink class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSink@7f3251fa [15:48:00,008][INFO ][importer.jdbc.context.standard][pool-9-thread-1] found source class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSource@6d013d9a [15:48:00,010][INFO ][BaseTransportClient ][pool-9-thread-1] creating transport client, java version 1.8.0_121, effective settings {cluster.name=elasticsearch, host.0=10.253.129.68, port=9300, sniff=false, autodiscover=false, name=importer, client.transport.ignore_cluster_name=false, client.transport.ping_timeout=5s, client.transport.nodes_sampler_interval=5s} [15:48:00,013][INFO ][org.elasticsearch.plugins][pool-9-thread-1] [importer] loaded [helper], sites [] [15:48:00,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":239989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:48:00,015][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":59961,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:48:00,065][INFO ][BaseTransportClient ][pool-9-thread-1] trying to connect to [inet[/10.253.129.68:9300]] [15:48:00,082][INFO ][BaseTransportClient ][pool-9-thread-1] connected to [[Space Phantom][tX4xYWcpTzOF28C726YqRQ][tarun][inet[/10.253.129.68:9300]]] [15:48:00,082][INFO ][importer.jdbc.sink.standard][pool-9-thread-1] creating index smart_users_v1 with settings = {index.number_of_shards=1} and mappings = [15:48:00,083][INFO ][org.xbib.elasticsearch.helper.client.BaseIngestTransportClient][pool-9-thread-1] settings = {index={number_of_shards=1}} [15:48:00,100][WARN ][importer.jdbc.sink.standard][pool-9-thread-1] [smart_users_v1] already exists [15:48:00,130][INFO ][importer.jdbc.context.standard][pool-9-thread-1] state persisted to smart_users_v1.json [15:48:30,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":269989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:48:30,015][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":29932,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:49:00,001][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:49:00,002][INFO ][importer.jdbc ][pool-10-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@1a819ba5 [15:49:00,003][INFO ][importer.jdbc.context.standard][pool-10-thread-1] found sink class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSink@6e96ad0e [15:49:00,003][INFO ][importer.jdbc.context.standard][pool-10-thread-1] found source class org.xbib.elasticsearch.jdbc.strategy.standard.StandardSource@2245d01d [15:49:00,004][INFO ][BaseTransportClient ][pool-10-thread-1] creating transport client, java version 1.8.0_121, effective settings {cluster.name=elasticsearch, host.0=10.253.129.68, port=9300, sniff=false, autodiscover=false, name=importer, client.transport.ignore_cluster_name=false, client.transport.ping_timeout=5s, client.transport.nodes_sampler_interval=5s} [15:49:00,007][INFO ][org.elasticsearch.plugins][pool-10-thread-1] [importer] loaded [helper], sites [] [15:49:00,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":299989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:49:00,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":59932,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:49:00,045][INFO ][BaseTransportClient ][pool-10-thread-1] trying to connect to [inet[/10.253.129.68:9300]] [15:49:00,053][INFO ][BaseTransportClient ][pool-10-thread-1] connected to [[Space Phantom][tX4xYWcpTzOF28C726YqRQ][tarun][inet[/10.253.129.68:9300]]] [15:49:00,053][INFO ][importer.jdbc.sink.standard][pool-10-thread-1] creating index smart_users_v1 with settings = {index.number_of_shards=1} and mappings = [15:49:00,054][INFO ][org.xbib.elasticsearch.helper.client.BaseIngestTransportClient][pool-10-thread-1] settings = {index={number_of_shards=1}} [15:49:00,059][WARN ][importer.jdbc.sink.standard][pool-10-thread-1] [smart_users_v1] already exists [15:49:00,082][INFO ][importer.jdbc.context.standard][pool-10-thread-1] state persisted to smart_users_v1.json [15:49:30,014][INFO ][metrics.source.json ][pool-5-thread-1] {"totalrows":0,"elapsed":329989,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:49:30,014][INFO ][metrics.sink.json ][pool-5-thread-1] {"elapsed":29961,"submitted":0,"succeeded":0,"failed":0,"bytes":0,"avg":0.0,"dps":0.0,"mbps":0.0} [15:50:00,001][INFO ][importer.jdbc ][pool-2-thread-2] index name = smart_users_v1, concrete index name = smart_users_v1 [15:50:00,002][INFO ][importer.jdbc ][pool-11-thread-1] strategy standard: settings = {password=root, schedule=0 0-59 0-23 ? * *, user=root, index_settings.index.number_of_shards=1, elasticsearch.host=10.253.129.68, index=smart_users_v1, metrics.logger.plain=false, statefile=smart_users_v1.json, elasticsearch.port=9300, metrics.counter=1235, metrics.lastexecutionstart=2016-01-01T18:36:00.052Z, url=jdbc:mysql://localhost:3306/smart_api, metrics.enabled=true, locale=en_US, type=users_detail, elasticsearch.cluster=elasticsearch, metrics.lastexecutionend=2016-01-01T18:36:28.760Z, metrics.logger.json=true, sql.0.statement=select * from users where Unix_timestamp(updated_by) > Unix_timestamp(?), sql.0.parameter.0=$metrics.lastexecutionstart}, context = org.xbib.elasticsearch.jdbc.strategy.standard.StandardContext@7540106d
server response
[2017-02-07 15:45:00,159][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:45:00,190][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:46:00,056][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:46:00,085][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:47:00,060][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:47:00,085][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:48:00,105][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:48:00,134][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:49:00,060][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:49:00,085][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:50:00,041][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:50:00,067][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:51:00,048][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:51:00,071][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:52:00,072][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:52:00,107][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:53:00,039][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:53:00,063][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:54:00,057][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:54:00,089][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s] [2017-02-07 15:55:00,034][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [1s] to [-1] [2017-02-07 15:55:00,058][INFO ][index.shard ] [Space Phantom] [smart_users_v1][0] updating refresh_interval from [-1] to [1s]