Historical nodes bootstrap speed from HDFS / single thread

Hello,

i am trying to get the historical nodes to load segments faster. For
deep storage i am using HDFS. During bootstrap there is only one
thread/cpu constantly active on the historical nodes. Even though i have
numLoadingThreads and numBootstrapThreads set to 20. In the log i see a
segments pulled sequential every 8 seconds. Is there any other setting i
am missing? Like this it takes hours to load my index.

Thanks a lot for any help!
Regards,
Christian

2018-07-16T08:47:36,283 INFO [ZkCoordinator]
io.druid.storage.hdfs.HdfsDataSegmentPuller - Unzipped 534924062 bytes
from
[hdfs://mycluster/druid/segments/myindex/20180629T030000.000Z_20180629T040000.000Z/2018-07-15T18_17_15.562Z/32_index.zip]
to
[/opt/druid/var/runtime/segment-cache/data5/myindex/2018-06-29T03:00:00.000Z_2018-06-29T04:00:00.000Z/2018-07-15T18:17:15.562Z/32]
2018-07-16T08:47:43,544 INFO [ZkCoordinator]
io.druid.storage.hdfs.HdfsDataSegmentPuller - Unzipped 535188135 bytes
from
[hdfs://mycluster/druid/segments/myindex/20180629T030000.000Z_20180629T040000.000Z/2018-07-15T18_17_15.562Z/23_index.zip]
to
[/opt/druid/var/runtime/segment-cache/data1/myindex/2018-06-29T03:00:00.000Z_2018-06-29T04:00:00.000Z/2018-07-15T18:17:15.562Z/23]
2018-07-16T08:47:50,649 INFO [ZkCoordinator]
io.druid.storage.hdfs.HdfsDataSegmentPuller - Unzipped 535095888 bytes
from
[hdfs://mycluster/druid/segments/myindex/20180629T030000.000Z_20180629T040000.000Z/2018-07-15T18_17_15.562Z/20_index.zip]
to
[/opt/druid/var/runtime/segment-cache/data2/myindex/2018-06-29T03:00:00.000Z_2018-06-29T04:00:00.000Z/2018-07-15T18:17:15.562Z/20]

runtime.properties:

    druid.service=druid/historical
    druid.port=8083

    druid.server.http.numThreads=80

    druid.processing.buffer.sizeBytes=268435456
    druid.processing.numThreads=40
    druid.processing.numMergeBuffers=20

    druid.segmentCache.deleteOnRemove=true
    druid.segmentCache.numLoadingThreads=20
    druid.segmentCache.numBootstrapThreads=20

    druid.server.maxSize=128000000000
    druid.segmentCache.locations=[
{"path":"var/runtime/segment-cache/data1","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data2","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data3","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data4","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data5","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data6","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data7","maxSize"\:200000000000},{"path":"var/runtime/segment-cache/data8","maxSize"\:200000000000}
]

    druid.historical.cache.useCache=false
    druid.historical.cache.populateCache=false

common.runtime.properties:

    druid.extensions.loadList=["druid-hdfs-storage", "druid-histogram",
"druid-datasketches", "druid-lookups-cached-global",
"mysql-metadata-storage"]
    druid.startup.logging.logProperties=true

druid.zk.service.host=zookeeper-0.zookeeper.hadoop:2181,zookeeper-1.zookeeper.hadoop:2181,zookeeper-2.zookeeper.hadoop:2181
    druid.zk.paths.base=/druid
    druid.metadata.storage.type=mysql

druid.metadata.storage.connector.connectURI=jdbc:mysql://mysql.druid/druid
    druid.metadata.storage.connector.user=druid
    druid.metadata.storage.connector.password=druid
    druid.storage.type=hdfs
    druid.storage.storageDirectory=/druid/segments
    druid.indexer.logs.type=hdfs
    druid.indexer.logs.directory=/druid/indexing-logs
    druid.selectors.indexing.serviceName=druid/overlord
    druid.selectors.coordinator.serviceName=druid/coordinator
    druid.monitoring.monitors=
    druid.emitter=logging
    druid.emitter.logging.logLevel=info
    druid.indexing.doubleStorage=double