BufferUnderflowException when using HLL in TopN query

I found similar issue reported at https://github.com/druid-io/druid/issues/3560 it’s fixed in 0.9.2
But I still met this issue in 0.9.2, I wonder is it still happening in 0.10.0?

Here is the trace from historical log:

, exception=java.nio.BufferUnderflowException, query=TopNQuery{dataSource=‘dws_wl_hash_online’, dimensionSpec=DefaultDimensionSpec{dimension=‘query’, outputName=‘query’}, topNMetricSpec=NumericTopNMetricSpec{metric=‘PV’}, threshold=1000, querySegmentSpec=MultipleSpecificSegmentSpec{descriptors=[SegmentDescriptor{interval=2017-04-02T00:00:00.000+08:00/2017-04-02T23:59:59.999+08:00, version=‘2017-04-03T17:47:15.029+08:00’, partitionNumber=48}, SegmentDescriptor{interval=2017-04-02T00:00:00.000+08:00/2017-04-02T23:59:59.999+08:00, version=‘2017-04-03T17:47:15.029+08:00’, partitionNumber=157}, SegmentDescriptor{interval=2017-04-05T00:00:00.000+08:00/2017-04-05T23:59:59.999+08:00, version=‘2017-04-06T19:17:15.467+08:00’, partitionNumber=61}, SegmentDescriptor{interval=2017-04-06T00:00:00.000+08:00/2017-04-06T23:59:59.999+08:00, version=‘2017-04-07T17:17:16.054+08:00’, partitionNumber=163}, SegmentDescriptor{interval=2017-04-09T00:00:00.000+08:00/2017-04-09T23:59:59.999+08:00, version=‘2017-04-10T18:47:15.369+08:00’, partitionNumber=120}, SegmentDescriptor{interval=2017-04-11T00:00:00.000+08:00/2017-04-11T23:59:59.999+08:00, version=‘2017-04-12T20:47:13.851+08:00’, partitionNumber=98}, SegmentDescriptor{interval=2017-04-11T00:00:00.000+08:00/2017-04-11T23:59:59.999+08:00, version=‘2017-04-12T20:47:13.851+08:00’, partitionNumber=120}, SegmentDescriptor{interval=2017-04-12T00:00:00.000+08:00/2017-04-12T23:59:59.999+08:00, version=‘2017-04-13T19:10:41.400+08:00’, partitionNumber=142}, SegmentDescriptor{interval=2017-04-14T00:00:00.000+08:00/2017-04-14T23:59:59.999+08:00, version=‘2017-04-16T01:47:15.476+08:00’, partitionNumber=52}, SegmentDescriptor{interval=2017-04-16T00:00:00.000+08:00/2017-04-16T23:59:59.999+08:00, version=‘2017-04-18T21:17:13.746+08:00’, partitionNumber=224}, SegmentDescriptor{interval=2017-04-18T00:00:00.000+08:00/2017-04-18T23:59:59.999+08:00, version=‘2017-04-19T20:47:14.844+08:00’, partitionNumber=72}]}, dimFilter=((stat_date = 20170331 || stat_date = 20170401 || stat_date = 20170402 || stat_date = 20170403 || stat_date = 20170404 || stat_date = 20170405 || stat_date = 20170406 || stat_date = 20170407 || stat_date = 20170408 || stat_date = 20170409 || stat_date = 20170410 || stat_date = 20170411 || stat_date = 20170412 || stat_date = 20170413 || stat_date = 20170414 || stat_date = 20170415 || stat_date = 20170416 || stat_date = 20170417 || stat_date = 20170418) && !query = && cate_id = 350213), granularity=‘AllGranularity’, aggregatorSpecs=[CardinalityAggregatorFactory{name=‘UV’, fields=’[DefaultDimensionSpec{dimension=‘visitor_id’, outputName=‘visitor_id’}]’}, LongSumAggregatorFactory{fieldName=‘se_lpv_1d_004’, name=‘PV’}, LongSumAggregatorFactory{fieldName=‘se_ipv_1d_003’, name=‘IPV’}, DoubleSumAggregatorFactory{fieldName=‘pay_ord_amt_1d_007’, name=‘成交金额’}, LongSumAggregatorFactory{fieldName=‘pay_ord_cnt_1d_007’, name=‘成交笔数’}, DoubleSumAggregatorFactory{fieldName=‘pay_ord_amt_1d_019’, name=‘全引导成交金额’}, LongSumAggregatorFactory{fieldName=‘pay_ord_cnt_1d_021’, name=‘全引导成交笔数’}, LongSumAggregatorFactory{fieldName=‘clt_itm_cnt_1d_001’, name=‘收藏商品数’}, LongSumAggregatorFactory{fieldName=‘cart_itm_cnt_1d_001’, name=‘加购商品数’}, LongSumAggregatorFactory{fieldName=‘pay_ord_cnt_1d_024’, name=‘C宝贝成交笔数’}, DoubleSumAggregatorFactory{fieldName=‘pay_ord_amt_1d_025’, name=‘C宝贝成交金额’}, LongSumAggregatorFactory{fieldName=‘se_ipv_1d_005’, name=‘C宝贝IPV’}, LongSumAggregatorFactory{fieldName=‘c_pv’, name=‘C宝贝PV’}, LongSumAggregatorFactory{fieldName=‘b_pv’, name=‘B宝贝PV’}, LongSumAggregatorFactory{fieldName=‘se_ipv_1d_004’, name=‘B宝贝IPV’}], postAggregatorSpecs=}, peer=11.251.156.143}}

{feed=alerts, timestamp=2017-04-20T14:16:03.894+08:00, service=historical, host:8083=hadoop0797.et2.tbsite.net, severity=component-failure, description=Exception handling request, data={class=io.druid.server.QueryResource, exceptionType=class java.nio.BufferUnderflowException, exceptionMessage=null, exceptionStackTrace=java.nio.BufferUnderflowException

at java.nio.Buffer.nextGetIndex(Buffer.java:506)

at java.nio.HeapByteBuffer.getShort(HeapByteBuffer.java:310)

at io.druid.query.aggregation.hyperloglog.HyperLogLogCollector.fold(HyperLogLogCollector.java:398)

at io.druid.query.aggregation.cardinality.CardinalityAggregatorFactory.combine(CardinalityAggregatorFactory.java:198)

at io.druid.query.topn.TopNBinaryFn.apply(TopNBinaryFn.java:106)

at io.druid.query.topn.TopNBinaryFn.apply(TopNBinaryFn.java:39)

at io.druid.common.guava.CombiningSequence$CombiningYieldingAccumulator.accumulate(CombiningSequence.java:212)

at com.metamx.common.guava.BaseSequence.makeYielder(BaseSequence.java:105)

at com.metamx.common.guava.BaseSequence.toYielder(BaseSequence.java:82)

at io.druid.common.guava.CombiningSequence.toYielder(CombiningSequence.java:78)

at com.metamx.common.guava.MappedSequence.toYielder(MappedSequence.java:46)

at io.druid.query.CPUTimeMetricQueryRunner$1.toYielder(CPUTimeMetricQueryRunner.java:93)

at com.metamx.common.guava.Sequences$1.toYielder(Sequences.java:98)

at io.druid.server.QueryResource.doPost(QueryResource.java:231)