2020-02-11T17:56:38,858 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,860 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,860 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,862 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,862 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,864 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,865 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,867 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,867 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,869 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,869 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,871 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,871 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,873 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,873 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,875 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,875 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,877 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,877 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,888 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,888 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,891 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Found approximately [0] rows in data. 2020-02-11T17:56:38,891 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - Creating [0] shards 2020-02-11T17:56:38,891 INFO [task-runner-0-priority-0] io.druid.indexer.DetermineHashedPartitionsJob - DetermineHashedPartitionsJob took 48257 millis 2020-02-11T17:56:38,891 INFO [task-runner-0-priority-0] io.druid.indexer.JobHelper - Deleting path[/tmp/druid-indexing/nio/2020-02-11T175604.974+0700_df565071f4294faca9f6c97544921bda] 2020-02-11T17:56:38,923 INFO [task-runner-0-priority-0] io.druid.indexing.common.actions.RemoteTaskActionClient - Performing action for task[index_hadoop_nio_2020-02-11T17:56:04.974+07:00]: LockListAction{} 2020-02-11T17:56:38,925 INFO [task-runner-0-priority-0] io.druid.indexing.common.actions.RemoteTaskActionClient - Submitting action for task[index_hadoop_nio_2020-02-11T17:56:04.974+07:00] to overlord[http://bigdata13.vnpt.vn:8090/druid/indexer/v1/action]: LockListAction{} 2020-02-11T17:56:38,934 INFO [task-runner-0-priority-0] io.druid.indexing.common.task.HadoopIndexTask - Setting version to: 2020-02-11T17:56:05.000+07:00 2020-02-11T17:56:38,945 INFO [task-runner-0-priority-0] io.druid.indexer.HadoopDruidIndexerConfig - Running with config: { "spec" : { "dataSchema" : { "dataSource" : "nio", "parser" : { "type" : "hadoopyString", "parseSpec" : { "format" : "csv", "timestampSpec" : { "column" : "time_stamp", "format" : "yyyyMMddHHmmss" }, "columns" : [ "time_stamp", "imei", "imsi", "location", "rat_type", "client_ip", "application_category", "application_name", "vol_in", "vol_out", "record_duration", "rxmit_vol_in", "rxmit_vol_out", "pkt_in", "pkt_out", "rxmit_pkt_in", "rxmit_pkt_out", "reorder_pkt", "rxmit_pkt", "client_delay", "first_data_delay", "std", "network_delay", "msisdn", "server_ip", "server_port", "pdp_ctx_activation_time", "apn" ], "dimensionsSpec" : { "dimensions" : [ "imei", "imsi", "location", "rat_type", "client_ip", "application_category", "application_name", "msisdn", "server_ip", "server_port", "apn" ] } } }, "metricsSpec" : [ { "type" : "count", "name" : "count" } ], "granularitySpec" : { "type" : "uniform", "segmentGranularity" : "HOUR", "queryGranularity" : { "type" : "none" }, "rollup" : true, "intervals" : [ "2019-12-09T00:00:00.000+07:00/2019-12-10T00:00:00.000+07:00" ] } }, "ioConfig" : { "type" : "hadoop", "inputSpec" : { "type" : "static", "paths" : "/user/hopcq/nio1day/niotopic_20191210045401.csv/part-00000" }, "metadataUpdateSpec" : null, "segmentOutputPath" : "/user/druid/data" }, "tuningConfig" : { "type" : "hadoop", "workingPath" : "/tmp/druid-indexing", "version" : "2020-02-11T17:56:05.000+07:00", "partitionsSpec" : { "type" : "hashed", "targetPartitionSize" : 5000000, "maxPartitionSize" : 7500000, "assumeGrouped" : false, "numShards" : -1, "partitionDimensions" : [ ] }, "shardSpecs" : { "2019-12-09T00:00:00.000+07:00" : [ ], "2019-12-09T01:00:00.000+07:00" : [ ], "2019-12-09T02:00:00.000+07:00" : [ ], "2019-12-09T03:00:00.000+07:00" : [ ], "2019-12-09T04:00:00.000+07:00" : [ ], "2019-12-09T05:00:00.000+07:00" : [ ], "2019-12-09T06:00:00.000+07:00" : [ ], "2019-12-09T07:00:00.000+07:00" : [ ], "2019-12-09T08:00:00.000+07:00" : [ ], "2019-12-09T09:00:00.000+07:00" : [ ], "2019-12-09T10:00:00.000+07:00" : [ ], "2019-12-09T11:00:00.000+07:00" : [ ], "2019-12-09T12:00:00.000+07:00" : [ ], "2019-12-09T13:00:00.000+07:00" : [ ], "2019-12-09T14:00:00.000+07:00" : [ ], "2019-12-09T15:00:00.000+07:00" : [ ], "2019-12-09T16:00:00.000+07:00" : [ ], "2019-12-09T17:00:00.000+07:00" : [ ], "2019-12-09T18:00:00.000+07:00" : [ ], "2019-12-09T19:00:00.000+07:00" : [ ], "2019-12-09T20:00:00.000+07:00" : [ ], "2019-12-09T21:00:00.000+07:00" : [ ], "2019-12-09T22:00:00.000+07:00" : [ ], "2019-12-09T23:00:00.000+07:00" : [ ] }, "indexSpec" : { "bitmap" : { "type" : "concise" }, "dimensionCompression" : "lz4", "metricCompression" : "lz4", "longEncoding" : "longs" }, "maxRowsInMemory" : 75000, "leaveIntermediate" : false, "cleanupOnFailure" : true, "overwriteFiles" : false, "ignoreInvalidRows" : false, "jobProperties" : { "mapreduce.job.queuename" : "api" }, "combineText" : false, "useCombiner" : false, "buildV9Directly" : false, "numBackgroundPersistThreads" : 0, "forceExtendableShardSpecs" : false }, "uniqueId" : "df565071f4294faca9f6c97544921bda" } } 2020-02-11T17:56:38,977 INFO [task-runner-0-priority-0] io.druid.indexing.common.task.HadoopIndexTask - Starting a hadoop index generator job... 2020-02-11T17:56:38,994 INFO [task-runner-0-priority-0] io.druid.indexer.path.StaticPathSpec - Adding paths[/user/hopcq/nio1day/niotopic_20191210045401.csv/part-00000] 2020-02-11T17:56:38,997 INFO [task-runner-0-priority-0] io.druid.indexer.HadoopDruidIndexerJob - No metadataStorageUpdaterJob set in the config. This is cool if you are running a hadoop index task, otherwise nothing will be uploaded to database. 2020-02-11T17:56:39,018 ERROR [task-runner-0-priority-0] io.druid.indexing.overlord.ThreadPoolTaskRunner - Exception while running task[HadoopIndexTask{id=index_hadoop_nio_2020-02-11T17:56:04.974+07:00, type=index_hadoop, dataSource=nio}] java.lang.RuntimeException: java.lang.reflect.InvocationTargetException at com.google.common.base.Throwables.propagate(Throwables.java:160) ~[guava-16.0.1.jar:?] at io.druid.indexing.common.task.HadoopTask.invokeForeignLoader(HadoopTask.java:204) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexing.common.task.HadoopIndexTask.run(HadoopIndexTask.java:208) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:436) [druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:408) [druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at java.util.concurrent.FutureTask.run(FutureTask.java:266) [?:1.8.0_112] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [?:1.8.0_112] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [?:1.8.0_112] at java.lang.Thread.run(Thread.java:745) [?:1.8.0_112] Caused by: java.lang.reflect.InvocationTargetException at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_112] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_112] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_112] at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_112] at io.druid.indexing.common.task.HadoopTask.invokeForeignLoader(HadoopTask.java:201) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] ... 7 more Caused by: java.lang.RuntimeException: java.lang.RuntimeException: No buckets?? seems there is no data to index. at io.druid.indexer.IndexGeneratorJob.run(IndexGeneratorJob.java:215) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexer.JobHelper.runJobs(JobHelper.java:349) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexer.HadoopDruidIndexerJob.run(HadoopDruidIndexerJob.java:94) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexing.common.task.HadoopIndexTask$HadoopIndexGeneratorInnerProcessing.runTask(HadoopIndexTask.java:261) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_112] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_112] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_112] at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_112] at io.druid.indexing.common.task.HadoopTask.invokeForeignLoader(HadoopTask.java:201) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] ... 7 more Caused by: java.lang.RuntimeException: No buckets?? seems there is no data to index. at io.druid.indexer.IndexGeneratorJob.run(IndexGeneratorJob.java:176) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexer.JobHelper.runJobs(JobHelper.java:349) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexer.HadoopDruidIndexerJob.run(HadoopDruidIndexerJob.java:94) ~[druid-indexing-hadoop-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at io.druid.indexing.common.task.HadoopIndexTask$HadoopIndexGeneratorInnerProcessing.runTask(HadoopIndexTask.java:261) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_112] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_112] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_112] at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_112] at io.druid.indexing.common.task.HadoopTask.invokeForeignLoader(HadoopTask.java:201) ~[druid-indexing-service-0.9.2.2.6.1.0-129.jar:0.9.2.2.6.1.0-129] ... 7 more 2020-02-11T17:56:39,026 INFO [task-runner-0-priority-0] io.druid.indexing.overlord.TaskRunnerUtils - Task [index_hadoop_nio_2020-02-11T17:56:04.974+07:00] status changed to [FAILED]. 2020-02-11T17:56:39,028 INFO [task-runner-0-priority-0] io.druid.indexing.worker.executor.ExecutorLifecycle - Task completed with status: { "id" : "index_hadoop_nio_2020-02-11T17:56:04.974+07:00", "status" : "FAILED", "duration" : 51177 } 2020-02-11T17:56:39,035 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.server.coordination.AbstractDataSegmentAnnouncer.stop()] on object[io.druid.server.coordination.BatchDataSegmentAnnouncer@7f5ce33e]. 2020-02-11T17:56:39,035 INFO [main] io.druid.server.coordination.AbstractDataSegmentAnnouncer - Stopping class io.druid.server.coordination.BatchDataSegmentAnnouncer with config[io.druid.server.initialization.ZkPathsConfig@22e2266d] 2020-02-11T17:56:39,035 INFO [main] io.druid.curator.announcement.Announcer - unannouncing [/druid/announcements/bigdata107.vnpt.vn:8100] 2020-02-11T17:56:39,045 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.server.listener.announcer.ListenerResourceAnnouncer.stop()] on object[io.druid.query.lookup.LookupResourceListenerAnnouncer@78479f2b]. 2020-02-11T17:56:39,045 INFO [main] io.druid.curator.announcement.Announcer - unannouncing [/druid/listeners/lookups/__default/bigdata107.vnpt.vn:8100] 2020-02-11T17:56:39,060 INFO [main] io.druid.server.listener.announcer.ListenerResourceAnnouncer - Unannouncing start time on [/druid/listeners/lookups/__default/bigdata107.vnpt.vn:8100] 2020-02-11T17:56:39,060 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.query.lookup.LookupReferencesManager.stop()] on object[io.druid.query.lookup.LookupReferencesManager@59262a90]. 2020-02-11T17:56:39,060 INFO [main] io.druid.query.lookup.LookupReferencesManager - Stopping lookup factory references manager 2020-02-11T17:56:39,062 INFO [main] org.eclipse.jetty.server.ServerConnector - Stopped ServerConnector@5b332439{HTTP/1.1}{0.0.0.0:8100} 2020-02-11T17:56:39,064 INFO [main] org.eclipse.jetty.server.handler.ContextHandler - Stopped o.e.j.s.ServletContextHandler@3d40498a{/,null,UNAVAILABLE} 2020-02-11T17:56:39,066 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.indexing.worker.executor.ExecutorLifecycle.stop() throws java.lang.Exception] on object[io.druid.indexing.worker.executor.ExecutorLifecycle@6aa5974e]. 2020-02-11T17:56:39,066 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.indexing.overlord.ThreadPoolTaskRunner.stop()] on object[io.druid.indexing.overlord.ThreadPoolTaskRunner@1d283d1]. 2020-02-11T17:56:39,066 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.curator.discovery.ServerDiscoverySelector.stop() throws java.io.IOException] on object[io.druid.curator.discovery.ServerDiscoverySelector@73893ec1]. 2020-02-11T17:56:39,069 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.curator.announcement.Announcer.stop()] on object[io.druid.curator.announcement.Announcer@fb5aeed]. 2020-02-11T17:56:39,069 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.curator.discovery.ServerDiscoverySelector.stop() throws java.io.IOException] on object[io.druid.curator.discovery.ServerDiscoverySelector@4fecf308]. 2020-02-11T17:56:39,069 INFO [main] io.druid.curator.CuratorModule - Stopping Curator 2020-02-11T17:56:39,069 INFO [Curator-Framework-0] org.apache.curator.framework.imps.CuratorFrameworkImpl - backgroundOperationsLoop exiting 2020-02-11T17:56:39,071 INFO [main] org.apache.zookeeper.ZooKeeper - Session: 0x1702d34680e1795 closed 2020-02-11T17:56:39,072 INFO [main-EventThread] org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x1702d34680e1795 2020-02-11T17:56:39,072 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void com.metamx.http.client.NettyHttpClient.stop()] on object[com.metamx.http.client.NettyHttpClient@28ee0a3c]. 2020-02-11T17:56:39,083 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.storage.hdfs.HdfsStorageAuthentication.stop()] on object[io.druid.storage.hdfs.HdfsStorageAuthentication@15e1f8fe]. 2020-02-11T17:56:39,083 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void com.metamx.metrics.MonitorScheduler.stop()] on object[com.metamx.metrics.MonitorScheduler@7418d76e]. 2020-02-11T17:56:39,083 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void com.metamx.emitter.service.ServiceEmitter.close() throws java.io.IOException] on object[com.metamx.emitter.service.ServiceEmitter@44f0ff2b]. 2020-02-11T17:56:39,085 INFO [main] com.metamx.common.lifecycle.Lifecycle$AnnotationBasedHandler - Invoking stop method[public void io.druid.initialization.Log4jShutterDownerModule$Log4jShutterDowner.stop()] on object[io.druid.initialization.Log4jShutterDownerModule$Log4jShutterDowner@31834a2b]. 2020-02-11 17:56:39,114 pool-1-thread-1 ERROR Unable to register shutdown hook because JVM is shutting down. java.lang.IllegalStateException: Not started at io.druid.common.config.Log4jShutdown.addShutdownCallback(Log4jShutdown.java:45) at org.apache.logging.log4j.core.impl.Log4jContextFactory.addShutdownCallback(Log4jContextFactory.java:273) at org.apache.logging.log4j.core.LoggerContext.setUpShutdownHook(LoggerContext.java:256) at org.apache.logging.log4j.core.LoggerContext.start(LoggerContext.java:216) at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:145) at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:41) at org.apache.logging.log4j.LogManager.getContext(LogManager.java:182) at org.apache.logging.log4j.spi.AbstractLoggerAdapter.getContext(AbstractLoggerAdapter.java:103) at org.apache.logging.slf4j.Log4jLoggerFactory.getContext(Log4jLoggerFactory.java:43) at org.apache.logging.log4j.spi.AbstractLoggerAdapter.getLogger(AbstractLoggerAdapter.java:42) at org.apache.logging.slf4j.Log4jLoggerFactory.getLogger(Log4jLoggerFactory.java:29) at org.slf4j.LoggerFactory.getLogger(LoggerFactory.java:284) at org.apache.commons.logging.impl.SLF4JLogFactory.getInstance(SLF4JLogFactory.java:155) at org.apache.commons.logging.impl.SLF4JLogFactory.getInstance(SLF4JLogFactory.java:132) at org.apache.commons.logging.LogFactory.getLog(LogFactory.java:273) at org.apache.hadoop.hdfs.LeaseRenewer.(LeaseRenewer.java:72) at org.apache.hadoop.hdfs.DFSClient.getLeaseRenewer(DFSClient.java:830) at org.apache.hadoop.hdfs.DFSClient.close(DFSClient.java:968) at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1214) at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2886) at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2903) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)