trino
trino copied to clipboard
TestIcebergV2.testOptimizeDuringWriteOperations test failure
2023-08-16T11:39:56.1182633Z [ERROR] io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations -- Time elapsed: 12.72 s <<< FAILURE!
2023-08-16T11:39:56.1186317Z java.util.concurrent.ExecutionException: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=/tmp/test_iceberg_v28051858108317803431/iceberg_data/tpch/test_optimize_during_write_operations53gnv6bc1x-69fb48fd17a849928476a56d77cd3aad/data/20230816_113550_00047_hm4ja-37719a6f-27d3-455e-b575-806c77528a8d.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=7, file_sequence_number=7}
2023-08-16T11:39:56.1188284Z at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122)
2023-08-16T11:39:56.1189449Z at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191)
2023-08-16T11:39:56.1190255Z at io.trino.plugin.iceberg.TestIcebergV2.runOptimizeDuringWriteOperations(TestIcebergV2.java:447)
2023-08-16T11:39:56.1191190Z at io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations(TestIcebergV2.java:369)
2023-08-16T11:39:56.1192141Z at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
2023-08-16T11:39:56.1192908Z at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
2023-08-16T11:39:56.1194005Z at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
2023-08-16T11:39:56.1194752Z at java.base/java.lang.reflect.Method.invoke(Method.java:568)
2023-08-16T11:39:56.1195413Z at org.testng.internal.MethodInvocationHelper.invokeMethod(MethodInvocationHelper.java:104)
2023-08-16T11:39:56.1196078Z at org.testng.internal.Invoker.invokeMethod(Invoker.java:645)
2023-08-16T11:39:56.1196638Z at org.testng.internal.Invoker.invokeTestMethod(Invoker.java:851)
2023-08-16T11:39:56.1197213Z at org.testng.internal.Invoker.invokeTestMethods(Invoker.java:1177)
2023-08-16T11:39:56.1198262Z at org.testng.internal.TestMethodWorker.invokeTestMethods(TestMethodWorker.java:129)
2023-08-16T11:39:56.1198936Z at org.testng.internal.TestMethodWorker.run(TestMethodWorker.java:112)
2023-08-16T11:39:56.1245508Z at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
2023-08-16T11:39:56.1256615Z at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
2023-08-16T11:39:56.1257661Z at java.base/java.lang.Thread.run(Thread.java:833)
2023-08-16T11:39:56.1260529Z Caused by: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=/tmp/test_iceberg_v28051858108317803431/iceberg_data/tpch/test_optimize_during_write_operations53gnv6bc1x-69fb48fd17a849928476a56d77cd3aad/data/20230816_113550_00047_hm4ja-37719a6f-27d3-455e-b575-806c77528a8d.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=7, file_sequence_number=7}
2023-08-16T11:39:56.1262562Z at io.trino.plugin.iceberg.TestIcebergV2.lambda$runOptimizeDuringWriteOperations$6(TestIcebergV2.java:431)
2023-08-16T11:39:56.1263280Z at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539)
2023-08-16T11:39:56.1265080Z at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
2023-08-16T11:39:56.1265687Z ... 3 more
2023-08-16T11:39:56.1268303Z Caused by: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=/tmp/test_iceberg_v28051858108317803431/iceberg_data/tpch/test_optimize_during_write_operations53gnv6bc1x-69fb48fd17a849928476a56d77cd3aad/data/20230816_113550_00047_hm4ja-37719a6f-27d3-455e-b575-806c77528a8d.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=7, file_sequence_number=7}
2023-08-16T11:39:56.1270256Z at io.trino.testing.AbstractTestingTrinoClient.execute(AbstractTestingTrinoClient.java:122)
2023-08-16T11:39:56.1271079Z at io.trino.testing.DistributedQueryRunner.executeWithQueryId(DistributedQueryRunner.java:509)
2023-08-16T11:39:56.1271877Z at io.trino.testing.QueryAssertions.assertDistributedUpdate(QueryAssertions.java:107)
2023-08-16T11:39:56.1272578Z at io.trino.testing.QueryAssertions.assertUpdate(QueryAssertions.java:63)
2023-08-16T11:39:56.1273332Z at io.trino.testing.AbstractTestQueryFramework.assertUpdate(AbstractTestQueryFramework.java:400)
2023-08-16T11:39:56.1274163Z at io.trino.testing.AbstractTestQueryFramework.assertUpdate(AbstractTestQueryFramework.java:395)
2023-08-16T11:39:56.1275004Z at io.trino.plugin.iceberg.TestIcebergV2.lambda$runOptimizeDuringWriteOperations$6(TestIcebergV2.java:428)
2023-08-16T11:39:56.1275496Z ... 5 more
2023-08-16T11:39:56.1276063Z Suppressed: java.lang.Exception: SQL: ALTER TABLE test_optimize_during_write_operations53gnv6bc1x EXECUTE optimize
2023-08-16T11:39:56.1276827Z at io.trino.testing.DistributedQueryRunner.executeWithQueryId(DistributedQueryRunner.java:513)
2023-08-16T11:39:56.1277342Z ... 10 more
2023-08-16T11:39:56.1280959Z Caused by: org.apache.iceberg.exceptions.ValidationException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=/tmp/test_iceberg_v28051858108317803431/iceberg_data/tpch/test_optimize_during_write_operations53gnv6bc1x-69fb48fd17a849928476a56d77cd3aad/data/20230816_113550_00047_hm4ja-37719a6f-27d3-455e-b575-806c77528a8d.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=7, file_sequence_number=7}
2023-08-16T11:39:56.1282865Z at org.apache.iceberg.exceptions.ValidationException.check(ValidationException.java:49)
2023-08-16T11:39:56.1283796Z at org.apache.iceberg.MergingSnapshotProducer.validateNoNewDeletesForDataFiles(MergingSnapshotProducer.java:499)
2023-08-16T11:39:56.1284852Z at org.apache.iceberg.MergingSnapshotProducer.validateNoNewDeletesForDataFiles(MergingSnapshotProducer.java:431)
2023-08-16T11:39:56.1285691Z at org.apache.iceberg.BaseRewriteFiles.validate(BaseRewriteFiles.java:140)
2023-08-16T11:39:56.1286330Z at org.apache.iceberg.SnapshotProducer.apply(SnapshotProducer.java:216)
2023-08-16T11:39:56.1286963Z at org.apache.iceberg.SnapshotProducer.lambda$commit$2(SnapshotProducer.java:366)
2023-08-16T11:39:56.1309296Z at org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:413)
2023-08-16T11:39:56.1310022Z at org.apache.iceberg.util.Tasks$Builder.runSingleThreaded(Tasks.java:219)
2023-08-16T11:39:56.1310573Z at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:203)
2023-08-16T11:39:56.1311276Z at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:196)
2023-08-16T11:39:56.1311851Z at org.apache.iceberg.SnapshotProducer.commit(SnapshotProducer.java:364)
2023-08-16T11:39:56.1312508Z at org.apache.iceberg.BaseTransaction.applyUpdates(BaseTransaction.java:497)
2023-08-16T11:39:56.1313218Z at org.apache.iceberg.BaseTransaction.lambda$commitSimpleTransaction$5(BaseTransaction.java:420)
2023-08-16T11:39:56.1313876Z at org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:413)
2023-08-16T11:39:56.1314461Z at org.apache.iceberg.util.Tasks$Builder.runSingleThreaded(Tasks.java:219)
2023-08-16T11:39:56.1314978Z at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:203)
2023-08-16T11:39:56.1315472Z at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:196)
2023-08-16T11:39:56.1316116Z at org.apache.iceberg.BaseTransaction.commitSimpleTransaction(BaseTransaction.java:418)
2023-08-16T11:39:56.1316862Z at org.apache.iceberg.BaseTransaction.commitTransaction(BaseTransaction.java:302)
2023-08-16T11:39:56.1317947Z at io.trino.plugin.iceberg.IcebergMetadata.finishOptimize(IcebergMetadata.java:1316)
2023-08-16T11:39:56.1318708Z at io.trino.plugin.iceberg.IcebergMetadata.finishTableExecute(IcebergMetadata.java:1243)
2023-08-16T11:39:56.1319713Z at io.trino.plugin.base.classloader.ClassLoaderSafeConnectorMetadata.finishTableExecute(ClassLoaderSafeConnectorMetadata.java:233)
2023-08-16T11:39:56.1320732Z at io.trino.tracing.TracingConnectorMetadata.finishTableExecute(TracingConnectorMetadata.java:181)
2023-08-16T11:39:56.1321511Z at io.trino.metadata.MetadataManager.finishTableExecute(MetadataManager.java:357)
2023-08-16T11:39:56.1322215Z at io.trino.tracing.TracingMetadata.finishTableExecute(TracingMetadata.java:214)
2023-08-16T11:39:56.1322994Z at io.trino.sql.planner.LocalExecutionPlanner.lambda$createTableFinisher$4(LocalExecutionPlanner.java:4151)
2023-08-16T11:39:56.1323746Z at io.trino.operator.TableFinishOperator.getOutput(TableFinishOperator.java:319)
2023-08-16T11:39:56.1324358Z at io.trino.operator.Driver.processInternal(Driver.java:395)
2023-08-16T11:39:56.1324882Z at io.trino.operator.Driver.lambda$process$8(Driver.java:298)
2023-08-16T11:39:56.1325438Z at io.trino.operator.Driver.tryWithLock(Driver.java:694)
2023-08-16T11:39:56.1325927Z at io.trino.operator.Driver.process(Driver.java:290)
2023-08-16T11:39:56.1326502Z at io.trino.operator.Driver.processForDuration(Driver.java:261)
2023-08-16T11:39:56.1327151Z at io.trino.execution.SqlTaskExecution$DriverSplitRunner.processFor(SqlTaskExecution.java:887)
2023-08-16T11:39:56.1327998Z at io.trino.execution.executor.timesharing.PrioritizedSplitRunner.process(PrioritizedSplitRunner.java:187)
2023-08-16T11:39:56.1329122Z at io.trino.execution.executor.timesharing.TimeSharingTaskExecutor$TaskRunner.run(TimeSharingTaskExecutor.java:565)
2023-08-16T11:39:56.1329811Z at io.trino.$gen.Trino_testversion____20230816_113531_12064.run(Unknown Source)
2023-08-16T11:39:56.1330423Z at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
2023-08-16T11:39:56.1331138Z at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
2023-08-16T11:39:56.1331681Z at java.base/java.lang.Thread.run(Thread.java:833)
2023-08-16T11:39:56.1331924Z
2023-08-16T11:39:56.4796511Z [INFO]
2023-08-16T11:39:56.4797351Z [INFO] Results:
2023-08-16T11:39:56.4798204Z [INFO]
2023-08-16T11:39:56.4798676Z [ERROR] Failures:
2023-08-16T11:39:56.4853975Z [ERROR] TestIcebergV2.testOptimizeDuringWriteOperations:369->runOptimizeDuringWriteOperations:447 » Execution java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=/tmp/test_iceberg_v28051858108317803431/iceberg_data/tpch/test_optimize_during_write_operations53gnv6bc1x-69fb48fd17a849928476a56d77cd3aad/data/20230816_113550_00047_hm4ja-37719a6f-27d3-455e-b575-806c77528a8d.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=7, file_sequence_number=7}
https://github.com/trinodb/trino/actions/runs/5877054553/job/15937683383?pr=18692
fyi @findinpath @findepi
cc @alexjo2144
We could try increasing the blackhole table's delay if this is being flaky
https://github.com/trinodb/trino/actions/runs/7817020709/job/21324789666?pr=20617
Error: Errors:
Error: TestIcebergV2.testOptimizeDuringWriteOperations:473->runOptimizeDuringWriteOperations:551 » Execution java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operations0vws99bfd9-a99713841d5846b7bc2a318eb4702217/data/20240207_155021_00099_uj8qk-ffd837ac-b636-42a2-81a2-d7f31cd0ea62.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=8, file_sequence_number=8}
[INFO]
Error: Tests run: 1192, Failures: 0, Errors: 1, Skipped: 20
https://github.com/trinodb/trino/pull/18717 not sufficient?
https://github.com/trinodb/trino/actions/runs/7842991784/job/21402572815?pr=20641
java.util.concurrent.ExecutionException: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationse13wjz4l5x-66fc1418e99c4b4380d3b3ec4563d0ba/data/20240209_111555_00093_3hwjc-aae44dd7-a783-42e2-84ec-b7435a1ac602.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191)
at io.trino.plugin.iceberg.TestIcebergV2.runOptimizeDuringWriteOperations(TestIcebergV2.java:551)
at io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations(TestIcebergV2.java:473)
at java.base/java.lang.reflect.Method.invoke(Method.java:580)
at java.base/java.util.concurrent.RecursiveAction.exec(RecursiveAction.java:194)
at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:387)
at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.tryRemoveAndExec(ForkJoinPool.java:1351)
at java.base/java.util.concurrent.ForkJoinTask.awaitDone(ForkJoinTask.java:422)
at java.base/java.util.concurrent.ForkJoinTask.join(ForkJoinTask.java:651)
at java.base/java.util.concurrent.RecursiveAction.exec(RecursiveAction.java:194)
at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:387)
at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1312)
at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1843)
at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1808)
at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:188)
Caused by: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationse13wjz4l5x-66fc1418e99c4b4380d3b3ec4563d0ba/data/20240209_111555_00093_3hwjc-aae44dd7-a783-42e2-84ec-b7435a1ac602.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at io.trino.plugin.iceberg.TestIcebergV2.lambda$runOptimizeDuringWriteOperations$10(TestIcebergV2.java:535)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:572)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:317)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1144)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:642)
at java.base/java.lang.Thread.run(Thread.java:1583)
Caused by: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationse13wjz4l5x-66fc1418e99c4b4380d3b3ec4563d0ba/data/20240209_111555_00093_3hwjc-aae44dd7-a783-42e2-84ec-b7435a1ac602.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at io.trino.testing.AbstractTestingTrinoClient.execute(AbstractTestingTrinoClient.java:133)
at io.trino.testing.DistributedQueryRunner.executeInternal(DistributedQueryRunner.java:496)
at io.trino.testing.DistributedQueryRunner.executeWithPlan(DistributedQueryRunner.java:487)
at io.trino.testing.QueryAssertions.assertDistributedUpdate(QueryAssertions.java:108)
at io.trino.testing.QueryAssertions.assertUpdate(QueryAssertions.java:62)
at io.trino.testing.AbstractTestQueryFramework.assertUpdate(AbstractTestQueryFramework.java:411)
at io.trino.testing.AbstractTestQueryFramework.assertUpdate(AbstractTestQueryFramework.java:406)
at io.trino.plugin.iceberg.TestIcebergV2.lambda$runOptimizeDuringWriteOperations$10(TestIcebergV2.java:532)
... 5 more
Suppressed: java.lang.Exception: SQL: ALTER TABLE test_optimize_during_write_operationse13wjz4l5x EXECUTE optimize
2024-02-09T05:16:15.664-0600 INFO ForkJoinPool-1-worker-6 io.trino.testing.services.junit.LogTestDurationListener Test TestIcebergV2 took 56.06s
at io.trino.testing.DistributedQueryRunner.executeInternal(DistributedQueryRunner.java:499)
... 11 more
Caused by: org.apache.iceberg.exceptions.ValidationException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationse13wjz4l5x-66fc1418e99c4b4380d3b3ec4563d0ba/data/20240209_111555_00093_3hwjc-aae44dd7-a783-42e2-84ec-b7435a1ac602.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at org.apache.iceberg.exceptions.ValidationException.check(ValidationException.java:49)
at org.apache.iceberg.MergingSnapshotProducer.validateNoNewDeletesForDataFiles(MergingSnapshotProducer.java:488)
at org.apache.iceberg.MergingSnapshotProducer.validateNoNewDeletesForDataFiles(MergingSnapshotProducer.java:420)
at org.apache.iceberg.BaseRewriteFiles.validate(BaseRewriteFiles.java:140)
at org.apache.iceberg.SnapshotProducer.apply(SnapshotProducer.java:225)
at org.apache.iceberg.SnapshotProducer.lambda$commit$2(SnapshotProducer.java:376)
at org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:413)
at org.apache.iceberg.util.Tasks$Builder.runSingleThreaded(Tasks.java:219)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:203)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:196)
at org.apache.iceberg.SnapshotProducer.commit(SnapshotProducer.java:374)
at org.apache.iceberg.BaseTransaction.applyUpdates(BaseTransaction.java:499)
at org.apache.iceberg.BaseTransaction.lambda$commitSimpleTransaction$3(BaseTransaction.java:415)
at org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:413)
at org.apache.iceberg.util.Tasks$Builder.runSingleThreaded(Tasks.java:219)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:203)
at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:196)
at org.apache.iceberg.BaseTransaction.commitSimpleTransaction(BaseTransaction.java:413)
at org.apache.iceberg.BaseTransaction.commitTransaction(BaseTransaction.java:308)
at io.trino.plugin.iceberg.IcebergMetadata.finishOptimize(IcebergMetadata.java:1496)
at io.trino.plugin.iceberg.IcebergMetadata.finishTableExecute(IcebergMetadata.java:1423)
at io.trino.plugin.base.classloader.ClassLoaderSafeConnectorMetadata.finishTableExecute(ClassLoaderSafeConnectorMetadata.java:246)
at io.trino.tracing.TracingConnectorMetadata.finishTableExecute(TracingConnectorMetadata.java:185)
at io.trino.metadata.MetadataManager.finishTableExecute(MetadataManager.java:354)
at io.trino.tracing.TracingMetadata.finishTableExecute(TracingMetadata.java:226)
at io.trino.sql.planner.LocalExecutionPlanner.lambda$createTableFinisher$4(LocalExecutionPlanner.java:4172)
at io.trino.operator.TableFinishOperator.getOutput(TableFinishOperator.java:319)
at io.trino.operator.Driver.processInternal(Driver.java:398)
at io.trino.operator.Driver.lambda$process$8(Driver.java:301)
at io.trino.operator.Driver.tryWithLock(Driver.java:704)
at io.trino.operator.Driver.process(Driver.java:293)
at io.trino.operator.Driver.processForDuration(Driver.java:264)
at io.trino.execution.SqlTaskExecution$DriverSplitRunner.processFor(SqlTaskExecution.java:887)
at io.trino.execution.executor.dedicated.SplitProcessor.run(SplitProcessor.java:76)
at io.trino.execution.executor.dedicated.TaskEntry$VersionEmbedderBridge.lambda$run$0(TaskEntry.java:177)
at io.trino.$gen.Trino_testversion____20240209_111527_6958.run(Unknown Source)
at io.trino.execution.executor.dedicated.TaskEntry$VersionEmbedderBridge.run(TaskEntry.java:178)
at io.trino.execution.executor.scheduler.FairScheduler.runTask(FairScheduler.java:174)
at io.trino.execution.executor.scheduler.FairScheduler.lambda$submit$0(FairScheduler.java:161)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:572)
at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:131)
at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:76)
at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:82)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1144)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:642)
at java.base/java.lang.Thread.run(Thread.java:1583)```
https://github.com/trinodb/trino/actions/runs/8314106684/job/22750883124
Error: io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations -- Time elapsed: 16.13 s <<< ERROR!
java.util.concurrent.ExecutionException: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationsxlw5way5vl-4c005beeb6844e9a8014293da36bb17c/data/20240317_083102_00093_3rigf-857abce5-d456-4ede-8ad1-cfd11e063d1b.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191)
at io.trino.plugin.iceberg.TestIcebergV2.runOptimizeDuringWriteOperations(TestIcebergV2.java:551)
at io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations(TestIcebergV2.java:473)
at java.base/java.lang.reflect.Method.invoke(Method.java:580)
at java.base/java.util.concurrent.RecursiveAction.exec(RecursiveAction.java:194)
at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:387)
at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.tryRemoveAndExec(ForkJoinPool.java:1351)
at java.base/java.util.concurrent.ForkJoinTask.awaitDone(ForkJoinTask.java:422)
at java.base/java.util.concurrent.ForkJoinTask.join(ForkJoinTask.java:651)
at java.base/java.util.concurrent.RecursiveAction.exec(RecursiveAction.java:194)
at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:387)
at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1312)
at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1843)
at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1808)
at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:188)
Caused by: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationsxlw5way5vl-4c005beeb6844e9a8014293da36bb17c/data/20240317_083102_00093_3rigf-857abce5-d456-4ede-8ad1-cfd11e063d1b.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=215, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=null, data_sequence_number=2, file_sequence_number=2}
at io.trino.plugin.iceberg.TestIcebergV2.lambda$runOptimizeDuringWriteOperations$10(TestIcebergV2.java:535)
https://github.com/trinodb/trino/actions/runs/8754264429/job/24026031792?pr=21632
https://github.com/trinodb/trino/actions/runs/8992967220/job/24703922643
https://github.com/trinodb/trino/actions/runs/9276045576/job/25522303916?pr=21727
Error: io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations -- Time elapsed: 14.73 s <<< ERROR!
java.util.concurrent.ExecutionException: java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operations2auk2iu2vi-7b4f89b051bf4e19af9c55be008ff16d/data/20240528_205544_00102_cu2tt-ea1359de-4bc4-43d6-8312-cd2992e802f8.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=217, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=null, equality_ids=null, sort_order_id=0, data_sequence_number=2, file_sequence_number=2}
at java.base/java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.base/java.util.concurrent.FutureTask.get(FutureTask.java:191)
at io.trino.plugin.iceberg.TestIcebergV2.runOptimizeDuringWriteOperations(TestIcebergV2.java:603)
at io.trino.plugin.iceberg.TestIcebergV2.testOptimizeDuringWriteOperations(TestIcebergV2.java:525)
https://github.com/trinodb/trino/actions/runs/10145286959/job/28050850161?pr=22857
Error: Errors:
Error: TestIcebergV2.testOptimizeDuringWriteOperations:589->runOptimizeDuringWriteOperations:667 » Execution java.lang.RuntimeException: io.trino.testing.QueryFailedException: Cannot commit, found new delete for replaced data file: GenericDataFile{content=data, file_path=local:///tpch/test_optimize_during_write_operationsur6q25qh2j-bd4672c124c4483cb206b597f4e5aa05/data/20240729_134243_00161_pn7wt-8d088f13-e25c-4630-9495-ba3100e38813.parquet, file_format=PARQUET, spec_id=0, partition=PartitionData{}, record_count=1, file_size_in_bytes=217, column_sizes=null, value_counts=null, null_value_counts=null, nan_value_counts=null, lower_bounds=null, upper_bounds=null, key_metadata=null, split_offsets=[4], equality_ids=null, sort_order_id=0, data_sequence_number=11, file_sequence_number=11}
https://github.com/trinodb/trino/actions/runs/10355655497/job/28663846963?pr=23007
https://github.com/trinodb/trino/actions/runs/10382444062/job/28745569325