Class org.broadinstitute.hellbender.engine.spark.datasources.VariantsSparkSinkUnitTest

33

tests

1

failures

32

ignored

0.413s

duration

0%

successful

Failed tests

setupMiniCluster

org.apache.hadoop.ipc.RemoteException(java.lang.NoClassDefFoundError): Could not initialize class org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol.getDescriptor(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.getDescriptorForType(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:604)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169)
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712)
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203)

	at app//org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584)
	at app//org.apache.hadoop.ipc.Client.call(Client.java:1529)
	at app//org.apache.hadoop.ipc.Client.call(Client.java:1426)
	at app//org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258)
	at app//org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139)
	at app/jdk.proxy3/jdk.proxy3.$Proxy65.getDatanodeReport(Unknown Source)
	at app//org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$getDatanodeReport$28(ClientNamenodeProtocolTranslatorPB.java:713)
	at app//org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160)
	at app//org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getDatanodeReport(ClientNamenodeProtocolTranslatorPB.java:713)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base@17.0.6/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base@17.0.6/java.lang.reflect.Method.invoke(Method.java:568)
	at app//org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437)
	at app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170)
	at app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162)
	at app//org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100)
	at app//org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366)
	at app/jdk.proxy3/jdk.proxy3.$Proxy66.getDatanodeReport(Unknown Source)
	at app//org.apache.hadoop.hdfs.DFSClient.datanodeReport(DFSClient.java:2134)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2869)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2920)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1848)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:995)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:594)
	at app//org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:533)
	at app//org.broadinstitute.hellbender.testutils.MiniClusterUtils.getMiniCluster(MiniClusterUtils.java:30)
	at app//org.broadinstitute.hellbender.testutils.MiniClusterUtils.getMiniCluster(MiniClusterUtils.java:38)
	at app//org.broadinstitute.hellbender.engine.spark.datasources.VariantsSparkSinkUnitTest.setupMiniCluster(VariantsSparkSinkUnitTest.java:60)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base@17.0.6/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base@17.0.6/java.lang.reflect.Method.invoke(Method.java:568)
	at app//org.testng.internal.invokers.MethodInvocationHelper.invokeMethod(MethodInvocationHelper.java:139)
	at app//org.testng.internal.invokers.MethodInvocationHelper.invokeMethodConsideringTimeout(MethodInvocationHelper.java:69)
	at app//org.testng.internal.invokers.ConfigInvoker.invokeConfigurationMethod(ConfigInvoker.java:361)
	at app//org.testng.internal.invokers.ConfigInvoker.invokeConfigurations(ConfigInvoker.java:296)
	at app//org.testng.internal.invokers.TestMethodWorker.invokeBeforeClassMethods(TestMethodWorker.java:180)
	at app//org.testng.internal.invokers.TestMethodWorker.run(TestMethodWorker.java:122)
	at java.base@17.0.6/java.util.ArrayList.forEach(ArrayList.java:1511)
	at app//org.testng.TestRunner.privateRun(TestRunner.java:829)
	at app//org.testng.TestRunner.run(TestRunner.java:602)
	at app//org.testng.SuiteRunner.runTest(SuiteRunner.java:437)
	at app//org.testng.SuiteRunner.runSequentially(SuiteRunner.java:431)
	at app//org.testng.SuiteRunner.privateRun(SuiteRunner.java:391)
	at app//org.testng.SuiteRunner.run(SuiteRunner.java:330)
	at app//org.testng.SuiteRunnerWorker.runSuite(SuiteRunnerWorker.java:52)
	at app//org.testng.SuiteRunnerWorker.run(SuiteRunnerWorker.java:95)
	at app//org.testng.TestNG.runSuitesSequentially(TestNG.java:1256)
	at app//org.testng.TestNG.runSuitesLocally(TestNG.java:1176)
	at app//org.testng.TestNG.runSuites(TestNG.java:1099)
	at app//org.testng.TestNG.run(TestNG.java:1067)
	at org.gradle.api.internal.tasks.testing.testng.TestNGTestClassProcessor.runTests(TestNGTestClassProcessor.java:153)
	at org.gradle.api.internal.tasks.testing.testng.TestNGTestClassProcessor.stop(TestNGTestClassProcessor.java:95)
	at org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.stop(SuiteTestClassProcessor.java:63)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base@17.0.6/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base@17.0.6/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base@17.0.6/java.lang.reflect.Method.invoke(Method.java:568)
	at org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:36)
	at org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24)
	at org.gradle.internal.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:33)
	at org.gradle.internal.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:92)
	at jdk.proxy1/jdk.proxy1.$Proxy4.stop(Unknown Source)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker$3.run(TestWorker.java:200)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.executeAndMaintainThreadName(TestWorker.java:132)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:103)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:63)
	at org.gradle.process.internal.worker.child.ActionExecutionWorker.execute(ActionExecutionWorker.java:56)
	at org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:121)
	at org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:71)
	at app//worker.org.gradle.process.internal.worker.GradleWorkerMain.run(GradleWorkerMain.java:69)
	at app//worker.org.gradle.process.internal.worker.GradleWorkerMain.main(GradleWorkerMain.java:74)

Tests

Test Duration Result
setupMiniCluster 0.413s failed
testBrokenGVCFCasesAreDisallowed[0](false, .bcf) - ignored
testBrokenGVCFCasesAreDisallowed[1](false, .bcf.gz) - ignored
testBrokenGVCFCasesAreDisallowed[2](true, .g.bcf) - ignored
testBrokenGVCFCasesAreDisallowed[3](true, .g.bcf.gz) - ignored
testEnableDisableGVCFWriting[0](false, .vcf) - ignored
testEnableDisableGVCFWriting[1](false, .vcf.gz) - ignored
testEnableDisableGVCFWriting[2](true, .g.vcf) - ignored
testEnableDisableGVCFWriting[3](true, .g.vcf.gz) - ignored
testWritingToAnExistingFileHDFS[0](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf, true) - ignored
testWritingToAnExistingFileHDFS[1](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, true) - ignored
testWritingToAnExistingFileHDFS[2](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, false) - ignored
testWritingToAnExistingFileHDFS[3](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, true) - ignored
testWritingToAnExistingFileHDFS[4](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, false) - ignored
testWritingToAnExistingFileHDFS[5](/home/runner/work/gatk/gatk/src/test/resources/HSA19.dbsnp135.chr1_1M.exome_intervals.modified.vcf, .vcf, true) - ignored
testWritingToFileURL[0](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf, true) - ignored
testWritingToFileURL[1](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, true) - ignored
testWritingToFileURL[2](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, false) - ignored
testWritingToFileURL[3](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, true) - ignored
testWritingToFileURL[4](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, false) - ignored
testWritingToFileURL[5](/home/runner/work/gatk/gatk/src/test/resources/HSA19.dbsnp135.chr1_1M.exome_intervals.modified.vcf, .vcf, true) - ignored
variantsSinkHDFSTest[0](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf, true) - ignored
variantsSinkHDFSTest[1](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, true) - ignored
variantsSinkHDFSTest[2](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, false) - ignored
variantsSinkHDFSTest[3](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, true) - ignored
variantsSinkHDFSTest[4](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, false) - ignored
variantsSinkHDFSTest[5](/home/runner/work/gatk/gatk/src/test/resources/HSA19.dbsnp135.chr1_1M.exome_intervals.modified.vcf, .vcf, true) - ignored
variantsSinkTest[0](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf, true) - ignored
variantsSinkTest[1](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, true) - ignored
variantsSinkTest[2](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.bgz, false) - ignored
variantsSinkTest[3](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, true) - ignored
variantsSinkTest[4](/home/runner/work/gatk/gatk/src/test/resources/Homo_sapiens_assembly19.dbsnp135.chr1_1M.exome_intervals.vcf, .vcf.gz, false) - ignored
variantsSinkTest[5](/home/runner/work/gatk/gatk/src/test/resources/HSA19.dbsnp135.chr1_1M.exome_intervals.modified.vcf, .vcf, true) - ignored

Standard error

22:32:12.436 INFO  MiniDFSCluster - starting cluster: numNameNodes=1, numDataNodes=1
22:32:12.437 INFO  NameNode - Formatting using clusterid: testClusterID
22:32:12.437 INFO  FSEditLog - Edit logging is async:true
22:32:12.451 INFO  FSNamesystem - KeyProvider: null
22:32:12.451 INFO  FSNamesystem - fsLock is fair: true
22:32:12.451 INFO  FSNamesystem - Detailed lock hold time metrics enabled: false
22:32:12.451 INFO  FSNamesystem - fsOwner                = runner (auth:SIMPLE)
22:32:12.451 INFO  FSNamesystem - supergroup             = supergroup
22:32:12.451 INFO  FSNamesystem - isPermissionEnabled    = true
22:32:12.451 INFO  FSNamesystem - isStoragePolicyEnabled = true
22:32:12.451 INFO  FSNamesystem - HA Enabled: false
22:32:12.451 INFO  Util - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
22:32:12.451 INFO  DatanodeManager - Slow peers collection thread shutdown
22:32:12.452 INFO  DatanodeManager - dfs.block.invalidate.limit : configured=1000, counted=60, effected=1000
22:32:12.452 INFO  DatanodeManager - dfs.namenode.datanode.registration.ip-hostname-check=true
22:32:12.452 INFO  BlockManager - dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
22:32:12.452 INFO  BlockManager - The block deletion will start around 2025 Mar 28 22:32:12
22:32:12.452 INFO  GSet - Computing capacity for map BlocksMap
22:32:12.452 INFO  GSet - VM type       = 64-bit
22:32:12.452 INFO  GSet - 2.0% max memory 3.4 GB = 70 MB
22:32:12.452 INFO  GSet - capacity      = 2^23 = 8388608 entries
22:32:12.453 INFO  BlockManager - Storage policy satisfier is disabled
22:32:12.453 INFO  BlockManager - dfs.block.access.token.enable = false
22:32:12.453 INFO  BlockManagerSafeMode - Using 1000 as SafeModeMonitor Interval
22:32:12.453 INFO  BlockManagerSafeMode - dfs.namenode.safemode.threshold-pct = 0.999
22:32:12.453 INFO  BlockManagerSafeMode - dfs.namenode.safemode.min.datanodes = 0
22:32:12.453 INFO  BlockManagerSafeMode - dfs.namenode.safemode.extension = 0
22:32:12.453 INFO  BlockManager - defaultReplication         = 1
22:32:12.453 INFO  BlockManager - maxReplication             = 512
22:32:12.453 INFO  BlockManager - minReplication             = 1
22:32:12.453 INFO  BlockManager - maxReplicationStreams      = 2
22:32:12.453 INFO  BlockManager - redundancyRecheckInterval  = 3000ms
22:32:12.453 INFO  BlockManager - encryptDataTransfer        = false
22:32:12.453 INFO  BlockManager - maxNumBlocksToLog          = 1000
22:32:12.453 INFO  GSet - Computing capacity for map INodeMap
22:32:12.453 INFO  GSet - VM type       = 64-bit
22:32:12.453 INFO  GSet - 1.0% max memory 3.4 GB = 35 MB
22:32:12.453 INFO  GSet - capacity      = 2^22 = 4194304 entries
22:32:12.454 INFO  FSDirectory - ACLs enabled? true
22:32:12.454 INFO  FSDirectory - POSIX ACL inheritance enabled? true
22:32:12.454 INFO  FSDirectory - XAttrs enabled? true
22:32:12.454 INFO  NameNode - Caching file names occurring more than 10 times
22:32:12.454 INFO  SnapshotManager - Loaded config captureOpenFiles: false, skipCaptureAccessTimeOnlyChange: false, snapshotDiffAllowSnapRootDescendant: true, maxSnapshotFSLimit: 65536, maxSnapshotLimit: 65536
22:32:12.454 INFO  SnapshotManager - dfs.namenode.snapshot.deletion.ordered = false
22:32:12.454 INFO  SnapshotManager - SkipList is disabled
22:32:12.454 INFO  GSet - Computing capacity for map cachedBlocks
22:32:12.454 INFO  GSet - VM type       = 64-bit
22:32:12.455 INFO  GSet - 0.25% max memory 3.4 GB = 8.8 MB
22:32:12.455 INFO  GSet - capacity      = 2^20 = 1048576 entries
22:32:12.455 INFO  TopMetrics - NNTop conf: dfs.namenode.top.window.num.buckets = 10
22:32:12.455 INFO  TopMetrics - NNTop conf: dfs.namenode.top.num.users = 10
22:32:12.455 INFO  TopMetrics - NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
22:32:12.455 INFO  FSNamesystem - Retry cache on namenode is enabled
22:32:12.455 INFO  FSNamesystem - Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
22:32:12.455 INFO  GSet - Computing capacity for map NameNodeRetryCache
22:32:12.455 INFO  GSet - VM type       = 64-bit
22:32:12.455 INFO  GSet - 0.029999999329447746% max memory 3.4 GB = 1.0 MB
22:32:12.455 INFO  GSet - capacity      = 2^17 = 131072 entries
22:32:12.456 INFO  FSImage - Allocated new BlockPoolId: BP-120240910-10.1.0.27-1743201132456
22:32:12.458 INFO  Storage - Storage directory /tmp/minicluster_storage3069900156316144227/name-0-1 has been successfully formatted.
22:32:12.460 INFO  Storage - Storage directory /tmp/minicluster_storage3069900156316144227/name-0-2 has been successfully formatted.
22:32:12.471 INFO  FSImageFormatProtobuf - Saving image file /tmp/minicluster_storage3069900156316144227/name-0-2/current/fsimage.ckpt_0000000000000000000 using no compression
22:32:12.471 INFO  FSImageFormatProtobuf - Saving image file /tmp/minicluster_storage3069900156316144227/name-0-1/current/fsimage.ckpt_0000000000000000000 using no compression
22:32:12.475 INFO  FSImageFormatProtobuf - Image file /tmp/minicluster_storage3069900156316144227/name-0-1/current/fsimage.ckpt_0000000000000000000 of size 401 bytes saved in 0 seconds .
22:32:12.476 INFO  FSImageFormatProtobuf - Image file /tmp/minicluster_storage3069900156316144227/name-0-2/current/fsimage.ckpt_0000000000000000000 of size 401 bytes saved in 0 seconds .
22:32:12.478 INFO  NNStorageRetentionManager - Going to retain 1 images with txid >= 0
22:32:12.478 INFO  DatanodeManager - Slow peers collection thread shutdown
22:32:12.483 INFO  FSNamesystem - Stopping services started for active state
22:32:12.484 INFO  FSNamesystem - Stopping services started for standby state
22:32:12.485 INFO  NameNode - createNameNode []
22:32:12.486 WARN  MetricsConfig - Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
22:32:12.487 INFO  MetricsSystemImpl - Scheduled Metric snapshot period at 10 second(s).
22:32:12.487 INFO  MetricsSystemImpl - NameNode metrics system started
22:32:12.488 INFO  NameNodeUtils - fs.defaultFS is hdfs://127.0.0.1:0
22:32:12.504 INFO  JvmPauseMonitor - Starting JVM pause monitor
22:32:12.504 INFO  DFSUtil - Filter initializers set : org.apache.hadoop.http.lib.StaticUserWebFilter,org.apache.hadoop.hdfs.web.AuthFilterInitializer
22:32:12.504 INFO  DFSUtil - Starting Web-server for hdfs at: http://localhost:0
22:32:12.506 WARN  AuthenticationFilter - Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/runner/hadoop-http-auth-signature-secret
22:32:12.508 INFO  HttpServer2 - Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
22:32:12.508 INFO  HttpServer2 - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context hdfs
22:32:12.508 INFO  HttpServer2 - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
22:32:12.510 INFO  HttpServer2 - Added filter AuthFilter (class=org.apache.hadoop.hdfs.web.AuthFilter) to context hdfs
22:32:12.510 INFO  HttpServer2 - Added filter AuthFilter (class=org.apache.hadoop.hdfs.web.AuthFilter) to context static
22:32:12.510 INFO  HttpServer2 - ASYNC_PROFILER_HOME environment variable and async.profiler.home system property not specified. Disabling /prof endpoint.
22:32:12.510 INFO  HttpServer2 - addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/*
22:32:12.510 INFO  HttpServer2 - Jetty bound to port 35293
22:32:12.510 INFO  Server - jetty-9.4.56.v20240826; built: 2024-08-26T17:15:05.868Z; git: ec6782ff5ead824dabdcf47fa98f90a4aedff401; jvm 17.0.6+10
22:32:12.512 INFO  session - DefaultSessionIdManager workerName=node0
22:32:12.512 INFO  session - No SessionScavenger set, using defaults
22:32:12.512 INFO  session - node0 Scavenging every 660000ms
22:32:12.513 WARN  AuthenticationFilter - Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/runner/hadoop-http-auth-signature-secret
22:32:12.513 INFO  ContextHandler - Started o.e.j.s.ServletContextHandler@709bb6a6{static,/static,jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/static,AVAILABLE}
22:32:12.621 INFO  ContextHandler - Started o.e.j.w.WebAppContext@1d8264c9{hdfs,/,file:///tmp/jetty-localhost-35293-hadoop-hdfs-3_4_0-tests_jar-_-any-16924407802277112949/webapp/,AVAILABLE}{jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/hdfs}
22:32:12.622 INFO  AbstractConnector - Started ServerConnector@300f2857{HTTP/1.1, (http/1.1)}{localhost:35293}
22:32:12.622 INFO  Server - Started @34793ms
22:32:12.623 INFO  FSEditLog - Edit logging is async:true
22:32:12.634 INFO  FSNamesystem - KeyProvider: null
22:32:12.634 INFO  FSNamesystem - fsLock is fair: true
22:32:12.634 INFO  FSNamesystem - Detailed lock hold time metrics enabled: false
22:32:12.634 INFO  FSNamesystem - fsOwner                = runner (auth:SIMPLE)
22:32:12.634 INFO  FSNamesystem - supergroup             = supergroup
22:32:12.634 INFO  FSNamesystem - isPermissionEnabled    = true
22:32:12.634 INFO  FSNamesystem - isStoragePolicyEnabled = true
22:32:12.634 INFO  FSNamesystem - HA Enabled: false
22:32:12.635 INFO  Util - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
22:32:12.635 INFO  DatanodeManager - Slow peers collection thread shutdown
22:32:12.635 INFO  DatanodeManager - dfs.block.invalidate.limit : configured=1000, counted=60, effected=1000
22:32:12.635 INFO  DatanodeManager - dfs.namenode.datanode.registration.ip-hostname-check=true
22:32:12.635 INFO  BlockManager - dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
22:32:12.635 INFO  BlockManager - The block deletion will start around 2025 Mar 28 22:32:12
22:32:12.635 INFO  GSet - Computing capacity for map BlocksMap
22:32:12.635 INFO  GSet - VM type       = 64-bit
22:32:12.635 INFO  GSet - 2.0% max memory 3.4 GB = 70 MB
22:32:12.635 INFO  GSet - capacity      = 2^23 = 8388608 entries
22:32:12.637 INFO  BlockManager - Storage policy satisfier is disabled
22:32:12.637 INFO  BlockManager - dfs.block.access.token.enable = false
22:32:12.637 INFO  BlockManagerSafeMode - Using 1000 as SafeModeMonitor Interval
22:32:12.637 INFO  BlockManagerSafeMode - dfs.namenode.safemode.threshold-pct = 0.999
22:32:12.637 INFO  BlockManagerSafeMode - dfs.namenode.safemode.min.datanodes = 0
22:32:12.637 INFO  BlockManagerSafeMode - dfs.namenode.safemode.extension = 0
22:32:12.637 INFO  BlockManager - defaultReplication         = 1
22:32:12.637 INFO  BlockManager - maxReplication             = 512
22:32:12.637 INFO  BlockManager - minReplication             = 1
22:32:12.637 INFO  BlockManager - maxReplicationStreams      = 2
22:32:12.637 INFO  BlockManager - redundancyRecheckInterval  = 3000ms
22:32:12.637 INFO  BlockManager - encryptDataTransfer        = false
22:32:12.637 INFO  BlockManager - maxNumBlocksToLog          = 1000
22:32:12.638 INFO  GSet - Computing capacity for map INodeMap
22:32:12.638 INFO  GSet - VM type       = 64-bit
22:32:12.638 INFO  GSet - 1.0% max memory 3.4 GB = 35 MB
22:32:12.638 INFO  GSet - capacity      = 2^22 = 4194304 entries
22:32:12.639 INFO  FSDirectory - ACLs enabled? true
22:32:12.639 INFO  FSDirectory - POSIX ACL inheritance enabled? true
22:32:12.639 INFO  FSDirectory - XAttrs enabled? true
22:32:12.639 INFO  NameNode - Caching file names occurring more than 10 times
22:32:12.639 INFO  SnapshotManager - Loaded config captureOpenFiles: false, skipCaptureAccessTimeOnlyChange: false, snapshotDiffAllowSnapRootDescendant: true, maxSnapshotFSLimit: 65536, maxSnapshotLimit: 65536
22:32:12.639 INFO  SnapshotManager - dfs.namenode.snapshot.deletion.ordered = false
22:32:12.639 INFO  SnapshotManager - SkipList is disabled
22:32:12.639 INFO  GSet - Computing capacity for map cachedBlocks
22:32:12.639 INFO  GSet - VM type       = 64-bit
22:32:12.639 INFO  GSet - 0.25% max memory 3.4 GB = 8.8 MB
22:32:12.639 INFO  GSet - capacity      = 2^20 = 1048576 entries
22:32:12.639 INFO  TopMetrics - NNTop conf: dfs.namenode.top.window.num.buckets = 10
22:32:12.639 INFO  TopMetrics - NNTop conf: dfs.namenode.top.num.users = 10
22:32:12.639 INFO  TopMetrics - NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
22:32:12.639 INFO  FSNamesystem - Retry cache on namenode is enabled
22:32:12.639 INFO  FSNamesystem - Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
22:32:12.639 INFO  GSet - Computing capacity for map NameNodeRetryCache
22:32:12.639 INFO  GSet - VM type       = 64-bit
22:32:12.639 INFO  GSet - 0.029999999329447746% max memory 3.4 GB = 1.0 MB
22:32:12.639 INFO  GSet - capacity      = 2^17 = 131072 entries
22:32:12.642 INFO  Storage - Lock on /tmp/minicluster_storage3069900156316144227/name-0-1/in_use.lock acquired by nodename 2817@fv-az1720-291
22:32:12.643 INFO  Storage - Lock on /tmp/minicluster_storage3069900156316144227/name-0-2/in_use.lock acquired by nodename 2817@fv-az1720-291
22:32:12.644 INFO  FileJournalManager - Recovering unfinalized segments in /tmp/minicluster_storage3069900156316144227/name-0-1/current
22:32:12.644 INFO  FileJournalManager - Recovering unfinalized segments in /tmp/minicluster_storage3069900156316144227/name-0-2/current
22:32:12.644 INFO  FSImage - No edit log streams selected.
22:32:12.644 INFO  FSImage - Planning to load image: FSImageFile(file=/tmp/minicluster_storage3069900156316144227/name-0-1/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000)
22:32:12.645 INFO  FSImageFormatPBINode - Loading 1 INodes.
22:32:12.645 INFO  FSImageFormatPBINode - Successfully loaded 1 inodes
22:32:12.645 INFO  FSImageFormatPBINode - Completed update blocks map and name cache, total waiting duration 0ms.
22:32:12.645 INFO  FSImageFormatProtobuf - Loaded FSImage in 0 seconds.
22:32:12.645 INFO  FSImage - Loaded image for txid 0 from /tmp/minicluster_storage3069900156316144227/name-0-1/current/fsimage_0000000000000000000
22:32:12.645 INFO  FSNamesystem - Need to save fs image? false (staleImage=false, haEnabled=false, isRollingUpgrade=false)
22:32:12.646 INFO  FSEditLog - Starting log segment at 1
22:32:12.651 INFO  NameCache - initialized with 0 entries 0 lookups
22:32:12.651 INFO  FSNamesystem - Finished loading FSImage in 10 msecs
22:32:12.651 INFO  NameNode - RPC server is binding to localhost:0
22:32:12.651 INFO  NameNode - Enable NameNode state context:false
22:32:12.651 INFO  CallQueueManager - Using callQueue: class java.util.concurrent.LinkedBlockingQueue, queueCapacity: 1000, scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler, ipcBackoff: false, ipcFailOver: false.
22:32:12.651 INFO  Server - Listener at localhost:34531
22:32:12.652 INFO  Server - Starting Socket Reader #1 for port 0
22:32:12.654 INFO  NameNode - Clients are to use localhost:34531 to access this namenode/service.
22:32:12.654 INFO  FSNamesystem - Registered FSNamesystemState, ReplicatedBlocksState and ECBlockGroupsState MBeans.
22:32:12.667 INFO  LeaseManager - Number of blocks under construction: 0
22:32:12.667 INFO  DatanodeAdminDefaultMonitor - Initialized the Default Decommission and Maintenance monitor
22:32:12.668 INFO  BlockManager - Start MarkedDeleteBlockScrubber thread
22:32:12.668 INFO  BlockManager - initializing replication queues
22:32:12.668 INFO  StateChange - STATE* Leaving safe mode after 0 secs
22:32:12.668 INFO  StateChange - STATE* Network topology has 0 racks and 0 datanodes
22:32:12.668 INFO  StateChange - STATE* UnderReplicatedBlocks has 0 blocks
22:32:12.672 INFO  Server - IPC Server Responder: starting
22:32:12.672 INFO  Server - IPC Server listener on 0: starting
22:32:12.674 INFO  BlockManager - Total number of blocks            = 0
22:32:12.674 INFO  BlockManager - Number of invalid blocks          = 0
22:32:12.674 INFO  BlockManager - Number of under-replicated blocks = 0
22:32:12.674 INFO  BlockManager - Number of  over-replicated blocks = 0
22:32:12.674 INFO  BlockManager - Number of blocks being written    = 0
22:32:12.674 INFO  StateChange - STATE* Replication Queue initialization scan for invalid, over- and under-replicated blocks completed in 6 msec
22:32:12.674 INFO  BlockManager - Reconstruction queues initialisation progress: 0.0, total number of blocks processed: 0/0
22:32:12.675 INFO  NameNode - NameNode RPC up at: localhost/127.0.0.1:34531.
22:32:12.675 INFO  FSNamesystem - Starting services required for active state
22:32:12.675 INFO  FSDirectory - Initializing quota with 12 thread(s)
22:32:12.676 INFO  FSDirectory - Quota initialization completed in 0 milliseconds
name space=1
storage space=0
storage types=RAM_DISK=0, SSD=0, DISK=0, ARCHIVE=0, PROVIDED=0, NVDIMM=0
22:32:12.677 INFO  CacheReplicationMonitor - Starting CacheReplicationMonitor with interval 30000 milliseconds
22:32:12.678 INFO  MiniDFSCluster - Starting DataNode 0 with dfs.datanode.data.dir: [DISK]file:/tmp/minicluster_storage3069900156316144227/data/data1,[DISK]file:/tmp/minicluster_storage3069900156316144227/data/data2
22:32:12.679 INFO  ThrottledAsyncChecker - Scheduling a check for [DISK]file:/tmp/minicluster_storage3069900156316144227/data/data1
22:32:12.679 INFO  ThrottledAsyncChecker - Scheduling a check for [DISK]file:/tmp/minicluster_storage3069900156316144227/data/data2
22:32:12.689 INFO  MetricsSystemImpl - DataNode metrics system started (again)
22:32:12.689 INFO  Util - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
22:32:12.689 INFO  BlockScanner - Initialized block scanner with targetBytesPerSec 1048576
22:32:12.690 INFO  DataNode - Configured hostname is 127.0.0.1
22:32:12.690 INFO  Util - dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
22:32:12.690 INFO  DataNode - Starting DataNode with maxLockedMemory = 0
22:32:12.691 INFO  DataNode - Opened streaming server at /127.0.0.1:46057
22:32:12.691 INFO  DataNode - Balancing bandwidth is 104857600 bytes/s
22:32:12.691 INFO  DataNode - Number threads for balancing is 100
22:32:12.692 WARN  AuthenticationFilter - Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/runner/hadoop-http-auth-signature-secret
22:32:12.694 INFO  HttpServer2 - Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
22:32:12.695 INFO  HttpServer2 - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context datanode
22:32:12.695 INFO  HttpServer2 - Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
22:32:12.695 INFO  HttpServer2 - ASYNC_PROFILER_HOME environment variable and async.profiler.home system property not specified. Disabling /prof endpoint.
22:32:12.695 INFO  HttpServer2 - Jetty bound to port 46359
22:32:12.695 INFO  Server - jetty-9.4.56.v20240826; built: 2024-08-26T17:15:05.868Z; git: ec6782ff5ead824dabdcf47fa98f90a4aedff401; jvm 17.0.6+10
22:32:12.696 INFO  session - DefaultSessionIdManager workerName=node0
22:32:12.696 INFO  session - No SessionScavenger set, using defaults
22:32:12.696 INFO  session - node0 Scavenging every 660000ms
22:32:12.697 INFO  ContextHandler - Started o.e.j.s.ServletContextHandler@6096b636{static,/static,jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/static,AVAILABLE}
22:32:12.786 INFO  ContextHandler - Started o.e.j.w.WebAppContext@3561a0f7{datanode,/,file:///tmp/jetty-localhost-46359-hadoop-hdfs-3_4_0-tests_jar-_-any-17653970251020758176/webapp/,AVAILABLE}{jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/datanode}
22:32:12.786 INFO  AbstractConnector - Started ServerConnector@6754608e{HTTP/1.1, (http/1.1)}{localhost:46359}
22:32:12.786 INFO  Server - Started @34957ms
22:32:12.788 WARN  DatanodeHttpServer - Got null for restCsrfPreventionFilter - will not do any filtering.
22:32:12.789 INFO  DatanodeHttpServer - Listening HTTP traffic on /127.0.0.1:39759
22:32:12.789 INFO  DataNode - dnUserName = runner
22:32:12.789 INFO  DataNode - supergroup = supergroup
22:32:12.789 INFO  JvmPauseMonitor - Starting JVM pause monitor
22:32:12.789 INFO  CallQueueManager - Using callQueue: class java.util.concurrent.LinkedBlockingQueue, queueCapacity: 1000, scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler, ipcBackoff: false, ipcFailOver: false.
22:32:12.790 INFO  Server - Listener at localhost:39269
22:32:12.790 INFO  Server - Starting Socket Reader #1 for port 0
22:32:12.792 INFO  DataNode - Opened IPC server at /127.0.0.1:39269
22:32:12.803 INFO  DataNode - Refresh request received for nameservices: null
22:32:12.804 INFO  DataNode - Starting BPOfferServices for nameservices: <default>
22:32:12.805 INFO  DataNode - Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:34531 starting to offer service
22:32:12.805 INFO  Server - IPC Server Responder: starting
22:32:12.805 INFO  Server - IPC Server listener on 0: starting
22:32:12.807 WARN  Server - IPC Server handler 0 on default port 34531, call Call#9 Retry#0 org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol.versionRequest from localhost:44120 / 127.0.0.1:44120
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos
	at org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos$DatanodeProtocolService.getDescriptor(DatanodeProtocolProtos.java:34977) ~[hadoop-hdfs-3.4.0.jar:?]
	at org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos$DatanodeProtocolService$2.getDescriptorForType(DatanodeProtocolProtos.java:34750) ~[hadoop-hdfs-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:604) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) ~[hadoop-common-3.4.0.jar:?]
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) [?:?]
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439) [?:?]
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) [hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) [hadoop-common-3.4.0.jar:?]
22:32:12.808 WARN  DataNode - Problem connecting to server: localhost/127.0.0.1:34531
22:32:12.810 WARN  Server - IPC Server handler 1 on default port 34531, call Call#10 Retry#0 org.apache.hadoop.hdfs.protocol.ClientProtocol.getDatanodeReport from localhost:44136 / 127.0.0.1:44136
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol.getDescriptor(ClientNamenodeProtocolProtos.java) ~[hadoop-hdfs-client-3.4.0.jar:?]
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.getDescriptorForType(ClientNamenodeProtocolProtos.java) ~[hadoop-hdfs-client-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:604) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) ~[hadoop-common-3.4.0.jar:?]
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) [?:?]
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439) [?:?]
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) [hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) [hadoop-common-3.4.0.jar:?]
22:32:12.811 WARN  Server - IPC Server handler 2 on default port 34531, call Call#11 Retry#0 org.apache.hadoop.hdfs.protocol.ClientProtocol.getDatanodeReport from localhost:44136 / 127.0.0.1:44136
java.lang.NoClassDefFoundError: Could not initialize class org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol.getDescriptor(ClientNamenodeProtocolProtos.java) ~[hadoop-hdfs-client-3.4.0.jar:?]
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.getDescriptorForType(ClientNamenodeProtocolProtos.java) ~[hadoop-hdfs-client-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:604) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) ~[hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) ~[hadoop-common-3.4.0.jar:?]
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) [?:?]
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439) [?:?]
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) [hadoop-common-3.4.0.jar:?]
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203) [hadoop-common-3.4.0.jar:?]
22:32:12.812 WARN  MiniDFSCluster - Tried waitActive() 2 time(s) and failed, giving up.  org.apache.hadoop.ipc.RemoteException(java.lang.NoClassDefFoundError): Could not initialize class org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol.getDescriptor(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.getDescriptorForType(ClientNamenodeProtocolProtos.java)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:604)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246)
	at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169)
	at java.base/java.security.AccessController.doPrivileged(AccessController.java:712)
	at java.base/javax.security.auth.Subject.doAs(Subject.java:439)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3203)

	at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584)
	at org.apache.hadoop.ipc.Client.call(Client.java:1529)
	at org.apache.hadoop.ipc.Client.call(Client.java:1426)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258)
	at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139)
	at jdk.proxy3/jdk.proxy3.$Proxy65.getDatanodeReport(Unknown Source)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$getDatanodeReport$28(ClientNamenodeProtocolTranslatorPB.java:713)
	at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getDatanodeReport(ClientNamenodeProtocolTranslatorPB.java:713)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366)
	at jdk.proxy3/jdk.proxy3.$Proxy66.getDatanodeReport(Unknown Source)
	at org.apache.hadoop.hdfs.DFSClient.datanodeReport(DFSClient.java:2134)
	at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2869)
	at org.apache.hadoop.hdfs.MiniDFSCluster.waitActive(MiniDFSCluster.java:2920)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:1848)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:995)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:594)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:533)
	at org.broadinstitute.hellbender.testutils.MiniClusterUtils.getMiniCluster(MiniClusterUtils.java:30)
	at org.broadinstitute.hellbender.testutils.MiniClusterUtils.getMiniCluster(MiniClusterUtils.java:38)
	at org.broadinstitute.hellbender.engine.spark.datasources.VariantsSparkSinkUnitTest.setupMiniCluster(VariantsSparkSinkUnitTest.java:60)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at org.testng.internal.invokers.MethodInvocationHelper.invokeMethod(MethodInvocationHelper.java:139)
	at org.testng.internal.invokers.MethodInvocationHelper.invokeMethodConsideringTimeout(MethodInvocationHelper.java:69)
	at org.testng.internal.invokers.ConfigInvoker.invokeConfigurationMethod(ConfigInvoker.java:361)
	at org.testng.internal.invokers.ConfigInvoker.invokeConfigurations(ConfigInvoker.java:296)
	at org.testng.internal.invokers.TestMethodWorker.invokeBeforeClassMethods(TestMethodWorker.java:180)
	at org.testng.internal.invokers.TestMethodWorker.run(TestMethodWorker.java:122)
	at java.base/java.util.ArrayList.forEach(ArrayList.java:1511)
	at org.testng.TestRunner.privateRun(TestRunner.java:829)
	at org.testng.TestRunner.run(TestRunner.java:602)
	at org.testng.SuiteRunner.runTest(SuiteRunner.java:437)
	at org.testng.SuiteRunner.runSequentially(SuiteRunner.java:431)
	at org.testng.SuiteRunner.privateRun(SuiteRunner.java:391)
	at org.testng.SuiteRunner.run(SuiteRunner.java:330)
	at org.testng.SuiteRunnerWorker.runSuite(SuiteRunnerWorker.java:52)
	at org.testng.SuiteRunnerWorker.run(SuiteRunnerWorker.java:95)
	at org.testng.TestNG.runSuitesSequentially(TestNG.java:1256)
	at org.testng.TestNG.runSuitesLocally(TestNG.java:1176)
	at org.testng.TestNG.runSuites(TestNG.java:1099)
	at org.testng.TestNG.run(TestNG.java:1067)
	at org.gradle.api.internal.tasks.testing.testng.TestNGTestClassProcessor.runTests(TestNGTestClassProcessor.java:153)
	at org.gradle.api.internal.tasks.testing.testng.TestNGTestClassProcessor.stop(TestNGTestClassProcessor.java:95)
	at org.gradle.api.internal.tasks.testing.SuiteTestClassProcessor.stop(SuiteTestClassProcessor.java:63)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:568)
	at org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:36)
	at org.gradle.internal.dispatch.ReflectionDispatch.dispatch(ReflectionDispatch.java:24)
	at org.gradle.internal.dispatch.ContextClassLoaderDispatch.dispatch(ContextClassLoaderDispatch.java:33)
	at org.gradle.internal.dispatch.ProxyDispatchAdapter$DispatchingInvocationHandler.invoke(ProxyDispatchAdapter.java:92)
	at jdk.proxy1/jdk.proxy1.$Proxy4.stop(Unknown Source)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker$3.run(TestWorker.java:200)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.executeAndMaintainThreadName(TestWorker.java:132)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:103)
	at org.gradle.api.internal.tasks.testing.worker.TestWorker.execute(TestWorker.java:63)
	at org.gradle.process.internal.worker.child.ActionExecutionWorker.execute(ActionExecutionWorker.java:56)
	at org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:121)
	at org.gradle.process.internal.worker.child.SystemApplicationClassLoaderWorker.call(SystemApplicationClassLoaderWorker.java:71)
	at worker.org.gradle.process.internal.worker.GradleWorkerMain.run(GradleWorkerMain.java:69)
	at worker.org.gradle.process.internal.worker.GradleWorkerMain.main(GradleWorkerMain.java:74)

22:32:12.812 INFO  MiniDFSCluster - Shutting down the Mini HDFS Cluster
22:32:12.812 INFO  MiniDFSCluster - Shutting down DataNode 0
22:32:12.812 INFO  DataNode - Closing all peers.
22:32:12.815 INFO  ContextHandler - Stopped o.e.j.w.WebAppContext@3561a0f7{datanode,/,null,STOPPED}{jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/datanode}
22:32:12.816 INFO  AbstractConnector - Stopped ServerConnector@6754608e{HTTP/1.1, (http/1.1)}{localhost:0}
22:32:12.816 INFO  session - node0 Stopped scavenging
22:32:12.816 INFO  ContextHandler - Stopped o.e.j.s.ServletContextHandler@6096b636{static,/static,jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/static,STOPPED}
22:32:12.817 INFO  DataNode - Waiting up to 30 seconds for transfer threads to complete
22:32:12.817 INFO  Server - Stopping server on 39269
22:32:12.818 INFO  Server - Stopping IPC Server Responder
22:32:12.818 INFO  Server - Stopping IPC Server listener on 0
22:32:12.818 ERROR DataNode - Command processor encountered interrupt and exit.
22:32:12.818 WARN  DataNode - Ending command processor service for: Thread[Command processor,5,main]
22:32:12.818 INFO  DataNode - BPOfferService Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:34531 interrupted while requesting version info from NN
22:32:12.818 ERROR DataNode - Initialization failed for Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:34531. Exiting. 
java.io.IOException: DN shut down before block pool connected
	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.retrieveNamespaceInfo(BPServiceActor.java:276) ~[hadoop-hdfs-3.4.0.jar:?]
	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:306) ~[hadoop-hdfs-3.4.0.jar:?]
	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:889) [hadoop-hdfs-3.4.0.jar:?]
	at java.base/java.lang.Thread.run(Thread.java:833) [?:?]
22:32:12.819 WARN  DataNode - Ending block pool service for: Block pool <registering> (Datanode Uuid unassigned) service to localhost/127.0.0.1:34531
22:32:12.819 INFO  DataNode - Removed Block pool <registering> (Datanode Uuid unassigned)
22:32:12.819 INFO  DataNode - Shutdown complete.
22:32:12.819 WARN  DataSetLockManager - not open lock leak check func
22:32:12.819 INFO  MiniDFSCluster - Shutting down the namenode
22:32:12.819 INFO  FSNamesystem - Stopping services started for active state
22:32:12.819 INFO  FSEditLog - Ending log segment 1, 1
22:32:12.819 INFO  FSNamesystem - LazyPersistFileScrubber was interrupted, exiting
22:32:12.820 INFO  FSEditLog - Number of transactions: 2 Total time for transactions(ms): 0 Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 1 1 
22:32:12.820 INFO  FSNamesystem - NameNodeEditLogRoller was interrupted, exiting
22:32:12.820 INFO  FileJournalManager - Finalizing edits file /tmp/minicluster_storage3069900156316144227/name-0-1/current/edits_inprogress_0000000000000000001 -> /tmp/minicluster_storage3069900156316144227/name-0-1/current/edits_0000000000000000001-0000000000000000002
22:32:12.821 INFO  FileJournalManager - Finalizing edits file /tmp/minicluster_storage3069900156316144227/name-0-2/current/edits_inprogress_0000000000000000001 -> /tmp/minicluster_storage3069900156316144227/name-0-2/current/edits_0000000000000000001-0000000000000000002
22:32:12.821 INFO  FSEditLog - FSEditLogAsync was interrupted, exiting
22:32:12.821 INFO  Server - Stopping server on 34531
22:32:12.821 INFO  Server - Stopping IPC Server listener on 0
22:32:12.822 INFO  CacheReplicationMonitor - Shutting down CacheReplicationMonitor
22:32:12.822 INFO  BlockManager - Stopping MarkedDeleteBlockScrubber.
22:32:12.823 INFO  Server - Stopping IPC Server Responder
22:32:12.823 INFO  BlockManager - Stopping RedundancyMonitor.
22:32:12.823 INFO  DatanodeManager - Slow peers collection thread shutdown
22:32:12.828 INFO  FSNamesystem - Stopping services started for active state
22:32:12.829 INFO  FSNamesystem - Stopping services started for standby state
22:32:12.830 INFO  ContextHandler - Stopped o.e.j.w.WebAppContext@1d8264c9{hdfs,/,null,STOPPED}{jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/hdfs}
22:32:12.830 INFO  AbstractConnector - Stopped ServerConnector@300f2857{HTTP/1.1, (http/1.1)}{localhost:0}
22:32:12.830 INFO  session - node0 Stopped scavenging
22:32:12.831 INFO  ContextHandler - Stopped o.e.j.s.ServletContextHandler@709bb6a6{static,/static,jar:file:/home/runner/.gradle/caches/modules-2/files-2.1/org.apache.hadoop/hadoop-hdfs/3.4.0/18f9797a908899efbe4e27f6d5b76420f446a695/hadoop-hdfs-3.4.0-tests.jar!/webapps/static,STOPPED}
22:32:12.832 INFO  MetricsSystemImpl - Stopping DataNode metrics system...
22:32:12.832 INFO  MetricsSystemImpl - DataNode metrics system stopped.
22:32:12.832 INFO  MetricsSystemImpl - DataNode metrics system shutdown complete.