Class org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtilsUnitTest

5

tests

0

failures

0

ignored

13.415s

duration

100%

successful

Tests

Test Duration Result
testChangingContigsOnHeaderlessSAMRecord 0.006s passed
testHeaderMissingReadGroupFilds 0.007s passed
testReadsMissingReadGroups 0.074s passed
testSortOrderPartitioningCorrectness 13.327s passed
testSpanningIterator 0.001s passed

Standard error

21:25:42.380 ERROR Executor - Exception in task 1.0 in stage 33.0 (TID 22)
org.broadinstitute.hellbender.exceptions.UserException$ReadMissingReadGroup: SAM/BAM/CRAM file (unknown) is malformed: Read READ is missing the read group (RG) tag, which is required by the GATK.  Please use http://gatkforums.broadinstitute.org/discussion/59/companion-utilities-replacereadgroups to fix this problem
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.getLibraryForRead(MarkDuplicatesSparkUtils.java:64) ~[gatk-package-40db79c-SNAPSHOT-local.jar:4.6.1.0-11-g40db79c-SNAPSHOT]
	at org.broadinstitute.hellbender.utils.read.markduplicates.sparkrecords.Fragment.<init>(Fragment.java:37) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.broadinstitute.hellbender.utils.read.markduplicates.sparkrecords.MarkDuplicatesSparkRecord.newFragment(MarkDuplicatesSparkRecord.java:36) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.lambda$transformToDuplicateNames$0(MarkDuplicatesSparkUtils.java:142) ~[gatk-package-40db79c-SNAPSHOT-local.jar:4.6.1.0-11-g40db79c-SNAPSHOT]
	at java.base/java.util.stream.ReferencePipeline$15$1.accept(ReferencePipeline.java:540) ~[?:?]
	at java.base/java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1625) ~[?:?]
	at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:509) ~[?:?]
	at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:499) ~[?:?]
	at java.base/java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:921) ~[?:?]
	at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]
	at java.base/java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:682) ~[?:?]
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.lambda$transformToDuplicateNames$5ac2632f$1(MarkDuplicatesSparkUtils.java:150) ~[gatk-package-40db79c-SNAPSHOT-local.jar:4.6.1.0-11-g40db79c-SNAPSHOT]
	at org.apache.spark.api.java.JavaRDDLike.$anonfun$flatMapToPair$1(JavaRDDLike.scala:143) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at scala.collection.Iterator$$anon$10.nextCur(Iterator.scala:594) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:608) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:104) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:54) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.scheduler.Task.run(Task.scala:141) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:620) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:94) ~[gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:623) [gatk-package-40db79c-SNAPSHOT-local.jar:40db79c-SNAPSHOT]
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) [?:?]
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) [?:?]
	at java.base/java.lang.Thread.run(Thread.java:840) [?:?]
21:25:42.403 WARN  TaskSetManager - Lost task 1.0 in stage 33.0 (TID 22) (localhost executor driver): org.broadinstitute.hellbender.exceptions.UserException$ReadMissingReadGroup: SAM/BAM/CRAM file (unknown) is malformed: Read READ is missing the read group (RG) tag, which is required by the GATK.  Please use http://gatkforums.broadinstitute.org/discussion/59/companion-utilities-replacereadgroups to fix this problem
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.getLibraryForRead(MarkDuplicatesSparkUtils.java:64)
	at org.broadinstitute.hellbender.utils.read.markduplicates.sparkrecords.Fragment.<init>(Fragment.java:37)
	at org.broadinstitute.hellbender.utils.read.markduplicates.sparkrecords.MarkDuplicatesSparkRecord.newFragment(MarkDuplicatesSparkRecord.java:36)
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.lambda$transformToDuplicateNames$0(MarkDuplicatesSparkUtils.java:142)
	at java.base/java.util.stream.ReferencePipeline$15$1.accept(ReferencePipeline.java:540)
	at java.base/java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1625)
	at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:509)
	at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:499)
	at java.base/java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:921)
	at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
	at java.base/java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:682)
	at org.broadinstitute.hellbender.tools.spark.transforms.markduplicates.MarkDuplicatesSparkUtils.lambda$transformToDuplicateNames$5ac2632f$1(MarkDuplicatesSparkUtils.java:150)
	at org.apache.spark.api.java.JavaRDDLike.$anonfun$flatMapToPair$1(JavaRDDLike.scala:143)
	at scala.collection.Iterator$$anon$10.nextCur(Iterator.scala:594)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:608)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:140)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:104)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:54)
	at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:161)
	at org.apache.spark.scheduler.Task.run(Task.scala:141)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:620)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64)
	at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:94)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:623)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
	at java.base/java.lang.Thread.run(Thread.java:840)

21:25:42.405 ERROR TaskSetManager - Task 1 in stage 33.0 failed 1 times; aborting job
21:25:49.603 WARN  TaskSetManager - Stage 38 contains a task of very large size (7639 KiB). The maximum recommended task size is 1000 KiB.
21:25:51.034 WARN  TaskSetManager - Stage 40 contains a task of very large size (7640 KiB). The maximum recommended task size is 1000 KiB.