Druid Batch Ingestion fails with an array of JSON objects

Hi,
I am new to Druid and am in the process of learning it by doing. The problem I encountered and for which I had to spend good time is with JSON data format.
When doing batch data ingestion, providing an array of JSON objects–Which is a valid JSON format-- leads to failure with the below exception message. Data ingestion works without problem when provided with just a newline separated JSON objects. While I do not have knowledge enough to go over standard vs non-standard, Shouldn’t data ingestion work with an array of JSON objects Or provide better way to let user know of this problem ?
And the exception message thrown is also not helpful. My JSON file is generated by a Go/Python program, while sure we can generate a non-array newline separated json file, they by default produce an array of JSON objects.
Similar discussion here: https://groups.google.com/forum/#!topic/nodejs/0ohwx0vF-SY
Exception Message:

java.lang.Exception: io.druid.java.util.common.RE: Failure on row[[]
	at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462) ~[hadoop-mapreduce-client-common-2.7.3.jar:?]
	at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:522) [hadoop-mapreduce-client-common-2.7.3.jar:?]
Caused by: io.druid.java.util.common.RE: Failure on row[[]
	at io.druid.indexer.HadoopDruidIndexerMapper.map(HadoopDruidIndexerMapper.java:91) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.DetermineHashedPartitionsJob$DetermineCardinalityMapper.run(DetermineHashedPartitionsJob.java:286) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:243) ~[hadoop-mapreduce-client-common-2.7.3.jar:?]
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[?:1.8.0_45]
	at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_45]
	at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_45]
Caused by: io.druid.java.util.common.parsers.ParseException: Unable to parse row [[]
	at io.druid.java.util.common.parsers.JSONPathParser.parse(JSONPathParser.java:125) ~[java-util-0.10.1.jar:0.10.1]
	at io.druid.data.input.impl.StringInputRowParser.parseString(StringInputRowParser.java:141) ~[druid-api-0.10.1.jar:0.10.1]
	at io.druid.data.input.impl.StringInputRowParser.parse(StringInputRowParser.java:135) ~[druid-api-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopyStringInputRowParser.parse(HadoopyStringInputRowParser.java:49) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopDruidIndexerMapper.parseInputRow(HadoopDruidIndexerMapper.java:105) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopDruidIndexerMapper.map(HadoopDruidIndexerMapper.java:72) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.DetermineHashedPartitionsJob$DetermineCardinalityMapper.run(DetermineHashedPartitionsJob.java:286) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:243) ~[hadoop-mapreduce-client-common-2.7.3.jar:?]
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[?:1.8.0_45]
	at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_45]
	at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_45]
Caused by: com.fasterxml.jackson.databind.JsonMappingException: **Can not deserialize instance of java.util.LinkedHashMap out of START_ARRAY token
 at [Source: [; line: 1, column: 1]**
	at com.fasterxml.jackson.databind.JsonMappingException.from(JsonMappingException.java:148) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.DeserializationContext.mappingException(DeserializationContext.java:762) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.DeserializationContext.mappingException(DeserializationContext.java:758) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.deser.std.MapDeserializer.deserialize(MapDeserializer.java:331) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.deser.std.MapDeserializer.deserialize(MapDeserializer.java:26) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:3066) ~[jackson-databind-2.4.6.jar:2.4.6]
	at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:2168) ~[jackson-databind-2.4.6.jar:2.4.6]
	at io.druid.java.util.common.parsers.JSONPathParser.parse(JSONPathParser.java:97) ~[java-util-0.10.1.jar:0.10.1]
	at io.druid.data.input.impl.StringInputRowParser.parseString(StringInputRowParser.java:141) ~[druid-api-0.10.1.jar:0.10.1]
	at io.druid.data.input.impl.StringInputRowParser.parse(StringInputRowParser.java:135) ~[druid-api-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopyStringInputRowParser.parse(HadoopyStringInputRowParser.java:49) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopDruidIndexerMapper.parseInputRow(HadoopDruidIndexerMapper.java:105) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.HadoopDruidIndexerMapper.map(HadoopDruidIndexerMapper.java:72) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at io.druid.indexer.DetermineHashedPartitionsJob$DetermineCardinalityMapper.run(DetermineHashedPartitionsJob.java:286) ~[druid-indexing-hadoop-0.10.1.jar:0.10.1]
	at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) ~[hadoop-mapreduce-client-core-2.7.3.jar:?]
	at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:243) ~[hadoop-mapreduce-client-common-2.7.3.jar:?]
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[?:1.8.0_45]
	at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) ~[?:1.8.0_45]
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ~[?:1.8.0_45]
	at java.lang.Thread.run(Thread.java:745) ~[?:1.8.0_45]