openj9 icon indicating copy to clipboard operation
openj9 copied to clipboard

Vm frequent global gc due to infinite **thresholdFreeBytes** long values

Open ianguo opened this issue 2 years ago • 16 comments

Java -version output

JRE 1.8.0 Linux amd64-64 (build 1.8.0_292-ea-b01)

Summary of problem

The VM has been running for 120 days, and the CPU usage remains normal. On May 3rd, suddenly the CPU usage exceeded 90%, and then checking the GC logs revealed the issue.

Vm frequent global gc due to infinite thresholdFreeBytes long values (gc log kickoff xml element). gcpolicy:gencon Xms506m Xmx2027m

Execute full gc every 300 milliseconds, and the gc takes about 40 milliseconds.

Diagnostic files

<concurrent-kickoff id="31327720" timestamp="2023-05-04T03:47:19.347"> <kickoff reason="threshold reached" targetBytes="9223372036854951703" thresholdFreeBytes="1268213682555322368" remainingFree="330856944" tenureFreeBytes="1242924648" nurseryFreeBytes="37579056" /> </concurrent-kickoff>

ianguo avatar May 05 '23 14:05 ianguo

gclog fragment:

<?xml version="1.0" ?> `<verbosegc xmlns="http://www.ibm.com/j9/verbosegc" version="e777338e2_CMPRSS">` <initialized id="31327719" timestamp="2023-05-04T03:47:19.347"> <attribute name="gcPolicy" value="-Xgcpolicy:gencon" /> <attribute name="maxHeapSize" value="0x7eb00000" /> <attribute name="initialHeapSize" value="0x1fa00000" /> <attribute name="compressedRefs" value="true" /> <attribute name="compressedRefsDisplacement" value="0x0" /> <attribute name="compressedRefsShift" value="0x3" /> <attribute name="pageSize" value="0x1000" /> <attribute name="pageType" value="not used" /> <attribute name="requestedPageSize" value="0x1000" /> <attribute name="requestedPageType" value="not used" /> <attribute name="gcthreads" value="4" /> <attribute name="gcthreads Concurrent Mark" value="1" /> <attribute name="packetListSplit" value="1" /> <attribute name="cacheListSplit" value="1" /> <attribute name="splitFreeListSplitAmount" value="1" /> <attribute name="numaNodes" value="0" /> <system> <attribute name="physicalMemory" value="3221225472" /> <attribute name="numCPUs" value="8" /> <attribute name="architecture" value="amd64" /> <attribute name="os" value="Linux" /> <attribute name="osVersion" value="3.10.0-693.21.1.el7.x86_64" /> </system> <vmargs> <vmarg name="-Xjcl:jclse29" /> <vmarg name="-Djava.class.path=." /> <vmarg name="-Xms506m" /> <vmarg name="-Xmx2027m" /> <vmarg name="-Xquickstart" /> <vmarg name="-Dfile.encoding=UTF-8" /> <vmarg name="-Xlp:objectheap:pagesize=4K" /> <vmarg name="-Xlp:codecache:pagesize=4K" /> <vmarg name="-XX:+UseContainerSupport" /> <vmarg name="-Xgcpolicy:gencon" /> <vmarg name="-Xgcthreads4" /> <vmarg name="-Dsun.java.launcher=SUN_STANDARD" /> <vmarg name="-Dsun.java.launcher.pid=20" /> </vmargs> </initialized> `<concurrent-kickoff id="31327720" timestamp="2023-05-04T03:47:19.347">` ` <kickoff reason="threshold reached" targetBytes="9223372036854951703" thresholdFreeBytes="1268213682555322368" remainingFree="330856944" tenureFreeBytes="1242924648" nurseryFreeBytes="37579056" />` `</concurrent-kickoff>` `<exclusive-start id="31327721" timestamp="2023-05-04T03:47:19.353" intervalms="100.518">` ` <response-info timems="0.720" idlems="0.720" threads="0" lastid="00000000019FC900" lastname="data_source_spout:14-MultipleThreadSpoutExecutors" />` `</exclusive-start>` `<cycle-start id="31327722" type="global" contextid="0" timestamp="2023-05-04T03:47:19.353" intervalms="302.800" />` `<exclusive-end id="31327723" timestamp="2023-05-04T03:47:19.353" durationms="0.253" />` <exclusive-start id="31327724" timestamp="2023-05-04T03:47:19.554" intervalms="200.881"> <response-info timems="0.184" idlems="0.184" threads="0" lastid="0000000001A2AB00" lastname="Thread-121" /> </exclusive-start> <concurrent-global-final id="31327725" timestamp="2023-05-04T03:47:19.554" contextid="31327722" intervalms="301.359" > <concurrent-trace-info reason="card cleaning threshold reached" tracedByMutators="30405080" tracedByHelpers="144054356" cardsCleaned="51" workStackOverflowCount="0" /> </concurrent-global-final> <gc-start id="31327726" type="global" contextid="31327722" timestamp="2023-05-04T03:47:19.555"> <mem-info id="31327727" free="1277837456" total="1969946624" percent="64"> <mem type="nursery" free="34913968" total="531365888" percent="6"> <mem type="allocate" free="34913968" total="265682944" percent="13" /> <mem type="survivor" free="0" total="265682944" percent="0" /> </mem> <mem type="tenure" free="1242923488" total="1438580736" percent="86"> <mem type="soa" free="1228537312" total="1424194560" percent="86" /> <mem type="loa" free="14386176" total="14386176" percent="100" /> </mem> <remembered-set count="3285" /> </mem-info> </gc-start> <allocation-stats totalBytes="2568032" > <allocated-bytes non-tlh="360856" tlh="2207176" /> <largest-consumer threadName="EventManagerImp" threadId="0000000001317C00" bytes="854744" /> </allocation-stats> <gc-op id="31327728" type="rs-scan" timems="36.540" contextid="31327722" timestamp="2023-05-04T03:47:19.591"> <scan objectsFound="14914" bytesTraced="227784600" workStackOverflowCount="0" /> </gc-op> <gc-op id="31327729" type="card-cleaning" timems="0.978" contextid="31327722" timestamp="2023-05-04T03:47:19.592"> <card-cleaning cardsCleaned="315" bytesTraced="207984" workStackOverflowCount="0" /> </gc-op> <gc-op id="31327730" type="mark" timems="37.953" contextid="31327722" timestamp="2023-05-04T03:47:19.630"> <trace-info objectcount="693" scancount="619" scanbytes="14360" /> <finalization candidates="1166" enqueued="15" /> <ownableSynchronizers candidates="3874" cleared="15" /> <references type="soft" candidates="8724" cleared="0" enqueued="0" dynamicThreshold="27" maxThreshold="32" /> <references type="weak" candidates="15064" cleared="0" enqueued="0" /> <references type="phantom" candidates="124" cleared="0" enqueued="0" /> <stringconstants candidates="28626" cleared="6" /> <object-monitors candidates="229" cleared="3" /> </gc-op> <gc-op id="31327731" type="sweep" timems="3.341" contextid="31327722" timestamp="2023-05-04T03:47:19.634" /> <heap-resize id="31327732" type="contract" space="tenure" amount="71958528" count="1" timems="0.352" reason="excess free space following gc" timestamp="2023-05-04T03:47:19.635" /> <gc-end id="31327733" type="global" contextid="31327722" durationms="80.049" usertimems="182.135" systemtimems="0.000" stalltimems="62.760" timestamp="2023-05-04T03:47:19.635" activeThreads="4"> <mem-info id="31327734" free="1208525856" total="1897988096" percent="63"> <mem type="nursery" free="37559960" total="531365888" percent="7"> <mem type="allocate" free="37559960" total="265682944" percent="14" /> <mem type="survivor" free="0" total="265682944" percent="0" /> </mem> <mem type="tenure" free="1170965896" total="1366622208" percent="85" micro-fragmented="1280544" macro-fragmented="9288619"> <mem type="soa" free="1157299592" total="1352955904" percent="85" /> <mem type="loa" free="13666304" total="13666304" percent="100" /> </mem> <pending-finalizers system="15" default="0" reference="0" classloader="0" /> <remembered-set count="3278" /> </mem-info> </gc-end> <cycle-end id="31327735" type="global" contextid="31327722" timestamp="2023-05-04T03:47:19.635" /> <exclusive-end id="31327736" timestamp="2023-05-04T03:47:19.635" durationms="81.464" /> `<concurrent-kickoff id="31327737" timestamp="2023-05-04T03:47:19.636">` ` <kickoff reason="threshold reached" targetBytes="9223372036854894530" thresholdFreeBytes="1268213682555322368" remainingFree="330835656" tenureFreeBytes="1170965896" nurseryFreeBytes="37557768" />` `</concurrent-kickoff>` `<exclusive-start id="31327738" timestamp="2023-05-04T03:47:19.641" intervalms="87.174">` ` <response-info timems="1.003" idlems="1.003" threads="0" lastid="000000000101E000" lastname="common-kafka-thread-string" />` `</exclusive-start>` `<cycle-start id="31327739" type="global" contextid="0" timestamp="2023-05-04T03:47:19.641" intervalms="288.032" />` `<exclusive-end id="31327740" timestamp="2023-05-04T03:47:19.641" durationms="0.239" />` <exclusive-start id="31327741" timestamp="2023-05-04T03:47:19.835" intervalms="194.254"> <response-info timems="0.180" idlems="0.180" threads="0" lastid="0000000001979900" lastname="WorkerHeartbeatRunable" /> </exclusive-start> <concurrent-global-final id="31327742" timestamp="2023-05-04T03:47:19.836" contextid="31327739" intervalms="281.448" > <concurrent-trace-info reason="card cleaning threshold reached" tracedByMutators="36065092" tracedByHelpers="138445180" cardsCleaned="130" workStackOverflowCount="0" /> </concurrent-global-final> <gc-start id="31327743" type="global" contextid="31327739" timestamp="2023-05-04T03:47:19.836"> <mem-info id="31327744" free="1205607192" total="1897988096" percent="63"> <mem type="nursery" free="34641296" total="531365888" percent="6"> <mem type="allocate" free="34641296" total="265682944" percent="13" /> <mem type="survivor" free="0" total="265682944" percent="0" /> </mem> <mem type="tenure" free="1170965896" total="1366622208" percent="85"> <mem type="soa" free="1157299592" total="1352955904" percent="85" /> <mem type="loa" free="13666304" total="13666304" percent="100" /> </mem> <remembered-set count="3287" /> </mem-info> </gc-start> <allocation-stats totalBytes="2836864" > <allocated-bytes non-tlh="94600" tlh="2742264" /> <largest-consumer threadName="common-kafka-thread-string" threadId="000000000101E000" bytes="681552" /> </allocation-stats> <gc-op id="31327745" type="rs-scan" timems="36.116" contextid="31327739" timestamp="2023-05-04T03:47:19.872"> <scan objectsFound="11539" bytesTraced="226309908" workStackOverflowCount="0" /> </gc-op> <gc-op id="31327746" type="card-cleaning" timems="1.456" contextid="31327739" timestamp="2023-05-04T03:47:19.874"> <card-cleaning cardsCleaned="546" bytesTraced="1780372" workStackOverflowCount="0" /> </gc-op> <gc-op id="31327747" type="mark" timems="5.887" contextid="31327739" timestamp="2023-05-04T03:47:19.880"> <trace-info objectcount="583" scancount="527" scanbytes="11460" /> <finalization candidates="1153" enqueued="2" /> <ownableSynchronizers candidates="3917" cleared="60" /> <references type="soft" candidates="8724" cleared="0" enqueued="0" dynamicThreshold="27" maxThreshold="32" /> <references type="weak" candidates="15064" cleared="0" enqueued="0" /> <references type="phantom" candidates="124" cleared="0" enqueued="0" /> <stringconstants candidates="28620" cleared="0" /> <object-monitors candidates="226" cleared="0" /> </gc-op> <gc-op id="31327748" type="sweep" timems="4.046" contextid="31327739" timestamp="2023-05-04T03:47:19.884" /> <heap-resize id="31327749" type="expand" space="tenure" amount="227475456" count="1" timems="0.176" reason="excessive time being spent in gc" timestamp="2023-05-04T03:47:19.885" /> <gc-end id="31327750" type="global" contextid="31327739" durationms="48.498" usertimems="181.813" systemtimems="0.000" stalltimems="3.272" timestamp="2023-05-04T03:47:19.885" activeThreads="4"> <mem-info id="31327751" free="1436013728" total="2125463552" percent="67"> <mem type="nursery" free="37572376" total="531365888" percent="7"> <mem type="allocate" free="37572376" total="265682944" percent="14" /> <mem type="survivor" free="0" total="265682944" percent="0" /> </mem> <mem type="tenure" free="1398441352" total="1594097664" percent="87" micro-fragmented="1280544" macro-fragmented="9288619"> <mem type="soa" free="1382499720" total="1578156032" percent="87" /> <mem type="loa" free="15941632" total="15941632" percent="100" /> </mem> <pending-finalizers system="2" default="0" reference="0" classloader="0" /> <remembered-set count="3287" /> </mem-info> </gc-end> <cycle-end id="31327752" type="global" contextid="31327739" timestamp="2023-05-04T03:47:19.885" /> <exclusive-end id="31327753" timestamp="2023-05-04T03:47:19.885" durationms="49.848" />

ianguo avatar May 05 '23 14:05 ianguo

@amicic @dmitripivkine

pshipton avatar May 05 '23 15:05 pshipton

I noticed ugly number targetBytes="9223372036854951703" is a reasonable value 175895 with high bit set 0x800000000002AF17

dmitripivkine avatar May 09 '23 12:05 dmitripivkine

@ianguo, thanks for reporting this problem. Would you be able to attach a wider sequence of VGC logs, when things turned from normal to abnormal?

amicic avatar May 09 '23 15:05 amicic

Unfortunately, there are only frequent GC logs and no normal GC logs. Is there any reference value for the gc logs after VM restart? @amicic

ianguo avatar May 09 '23 23:05 ianguo

Is there any reference value for the gc logs after VM restart?

Sorry, I don't understand this question (what you mean by reference value)

amicic avatar May 10 '23 16:05 amicic

What I mean is "Is it beneficial to find the problem with the gc logs after restarting the VM?".

ianguo avatar May 10 '23 22:05 ianguo

Ah, I see. Unfortunately no, those logs would not be helpful.

amicic avatar May 10 '23 23:05 amicic

This is a hand drawn graph (Unable to provide original image due to information security reasons)of the sudden change in CPU usage at that time。 image

ianguo avatar May 10 '23 23:05 ianguo

The heap usage hand drawing. image

ianguo avatar May 10 '23 23:05 ianguo

@ianguo, thanks for reporting this problem. Would you be able to attach a wider sequence of VGC logs, when things turned from normal to abnormal?

@amicic @ianguo @pshipton @dmitripivkine we met this problem. this is the gc log: (gc-2024-09-18-19-05-22.log)

Last time good kickoff log at "2024-09-21T08:16:40.434" as follow:

<concurrent-kickoff id="2312513" timestamp="2024-09-21T08:16:40.434"> <kickoff reason="threshold reached" targetBytes="42331709" thresholdFreeBytes="5846272" remainingFree="5776168" tenureFreeBytes="107556888" nurseryFreeBytes="5645096" /> </concurrent-kickoff>

Additional Info: we run in k8s and the pod memory limit as bellow: GC targetBytes even > hierarchical_memsw_limit. HOW can it be?

cache 0 rss 0 rss_huge 0 shmem 0 mapped_file 0 dirty 0 writeback 0 swap 0 pgpgin 0 pgpgout 0 pgfault 0 pgmajfault 0 inactive_anon 0 active_anon 0 inactive_file 0 active_file 0 unevictable 0 hierarchical_memory_limit 2147483648 hierarchical_memsw_limit 9223372036854771712 total_cache 38928384 total_rss 1830211584 total_rss_huge 1522532352 total_shmem 0 total_mapped_file 0 total_dirty 0 total_writeback 0 total_swap 0 total_pgpgin 127802 total_pgpgout 91907 total_pgfault 132401 total_pgmajfault 0 total_inactive_anon 1830318080 total_active_anon 0 total_inactive_file 31088640 total_active_file 7704576 total_unevictable 0

ShanshLai avatar Sep 21 '24 08:09 ShanshLai

Value targetBytes="9223372036855215670" is 0x800000000006B636, so high bit is set in addition to value 439862

dmitripivkine avatar Sep 23 '24 15:09 dmitripivkine

Previous Global GC performed contraction:

<heap-resize id="2312600" type="contract" space="tenure" amount="3538944" count="1" timems="0.224" reason="insufficient time being spent in gc" timestamp="2024-09-21T08:16:43.044" />

dmitripivkine avatar Sep 23 '24 15:09 dmitripivkine

objectheap:page

@dmitripivkine THX for your reply. But I can't still get it. IS there something wrong with the system or bugs in openjdk8u373 with openj9 0.38.0?

ShanshLai avatar Sep 24 '24 02:09 ShanshLai

Previous Global GC performed contraction:

<heap-resize id="2312600" type="contract" space="tenure" amount="3538944" count="1" timems="0.224" reason="insufficient time being spent in gc" timestamp="2024-09-21T08:16:43.044" />

@dmitripivkine we noticed that after LOA is allocated. The kickoff calculation went wrong. We tried to set -Xnoloa and this problem never happened by now. So is there any relation with the LOA?

<mem type="tenure" free="154607760" total="285343744" percent="54" micro-fragmented="2745485" macro-fragmented="0">
  <mem type="soa" free="154607760" total="282489856" percent="54" />
  <mem type="loa" free="0" total="2853888" percent="0" />
</mem>
<pending-finalizers system="1" default="0" reference="1" classloader="0" />
<remembered-set count="1320" />

ShanshLai avatar Oct 08 '24 08:10 ShanshLai

@ShanshLai Thank you for this information. Unfortunately we can not reproduce the problem reliably to narrow down to incorrect calculation. We appreciate help to find a test case we can work with. Our guess is bad calculation can happen after object heap resizing. Yes, LOA resizing can be one of possible triggers.

dmitripivkine avatar Oct 08 '24 14:10 dmitripivkine

@dmitripivkine we tried optavgpause gc policy, this policy also do resizing, but did NOT reproduce the problem. Is this info useful?

image

ShanshLai avatar Nov 28 '24 08:11 ShanshLai

@dmitripivkine we tried optavgpause gc policy, this policy also do resizing, but did NOT reproduce the problem. Is this info useful?

@ShanshLai Thank you! Any piece of information is useful. We do see many cases when LOA resizing doesn't lead to the problem in our testing with various GC policies. So, LOA resizing can be related to the problem but not directly. Unfortunately we still can't reproduce the problem. We really need test case with reasonable level of reproducibility to conduct investigation.

dmitripivkine avatar Nov 28 '24 11:11 dmitripivkine

@dmitripivkine @pshipton @amicic After ran application in a Docker container for a few days, the same problem occurred. After turning on the debug logs and analyzing the JVM source code, it seems that it might be related to "bytes traced in Pass 1 Factor"( "_bytesTracedInPass1Factor" in the source code). While under normal circumstances, it should be a value between 0 and 1, but now it is "- nan".

  Really appreciate it if you could help me to determine where the problem lies. Here is the details.

Debug log of JVM

Update tuning statistics: Total Traced="116765828" (Pass 2 KO="2044")  Total Cleaned="3816" (Pass 2 KO="0")
                          Tenure Live object Factor="0.509" Tenure non-leaf object factor="0.216" 
                          Card Cleaning Factors: Pass1="0.000" Pass2="0.000"
                          Bytes traced in Pass 1 Factor="-nan"

Tune to heap : Trace target Pass 1="3777893365736055024" (Trace="3777893365736013824" Clean="41200")
               Trace target Pass 2="60543298" (Trace="60513972" Clean="29326")
               kickoffThreshold="472236670721427964" concurrentSlack="0"
               KO threshold="519460351532597248" KO threshold buffer="47223680811169284"
               Card Cleaning Threshold="204635898661306368" 
               Init Work Required="40776320" 

Analysis

  With debugging and printing info of GDB, I walked through the source code ConcurrentGCIncrementalUpdate.cpp.

  In function MM_ConcurrentGCIncrementalUpdate::tuneToHeap(MM_EnvironmentBase *env), source path where outliers of "kickoffThreshold's" comes from:   kickoffThreshold -> _traceTargetPass1 -> _bytesToTracePass1

  In MM_ConcurrentGCIncrementalUpdate::adjustTraceTarget():    _bytesToTracePass1 -> _bytesTracedInPass1Factor

  Then in MM_ConcurrentGCIncrementalUpdate::updateTuningStatisticsInternal(MM_EnvironmentBase *env), at line 768, it shows that the value of "_bytesTracedInPass1Factor" is related to "totalTracedPass1" and "totalTraced". As shown in the debug log above, the value of _bytesTracedInPass1Factor "-nan" is abnormal, and once an abnormality occurs, subsequent weighted calculations will always be incorrect.

  At this point, it's difficult to trace the values of "totalTracedPass1" and "totalTraced" any further. Hope this will help in further investigating the problem.

715 /**
716  *  Also, reset the card cleaning factors based on cards we cleaned this cycle.
717  */
718 void
719 MM_ConcurrentGCIncrementalUpdate::updateTuningStatisticsInternal(MM_EnvironmentBase *env)
720 {

749   /* Calculate new average if we got to end  */
750   case CONCURRENT_EXHAUSTED:
751   case CONCURRENT_FINAL_COLLECTION:
752    totalTraced = _stats.getTraceSizeCount() + _stats.getConHelperTraceSizeCount();
753    totalCleaned = _stats.getCardCleanCount() + _stats.getConHelperCardCleanCount();
754
755    if (_secondCardCleanPass) {
756     assume0((HIGH_VALUES != _totalTracedAtPass2KO)  && (HIGH_VALUES != _totalCleanedAtPass2KO));
757     totalTracedPass1 = _totalTracedAtPass2KO;
758     totalCleanedPass1 = _totalCleanedAtPass2KO;
759     totalCleanedPass2 = totalCleaned - _totalCleanedAtPass2KO;
760    } else {
761     assume0((HIGH_VALUES == _totalTracedAtPass2KO) && (HIGH_VALUES == _totalCleanedAtPass2KO));
762     totalTracedPass1 = totalTraced;
763     totalCleanedPass1 = totalCleaned;
764     totalCleanedPass2 = 0;
765    }
766
767    /* What factor of the tracing work was done beofre we started 2nd pass of card cleaning ?*/
768    newBytesTracedInPass1Factor = ((float)totalTracedPass1) / ((float)totalTraced);

772    _bytesTracedInPass1Factor = MM_Math::weightedAverage(_bytesTracedInPass1Factor, newBytesTracedInPass1Factor, BYTES_TRACED_IN_PASS_1_HISTORY_WEIGHT);

808
809   omrtty_printf("Update tuning statistics: Total Traced=\"%zu\" (Pass 2 KO=\"%zu\")  Total Cleaned=\"%zu\" (Pass 2 KO=\"%zu\")\n",
810       totalTraced, _totalTracedAtPass2KO, totalCleaned, _totalCleanedAtPass2KO);
811   omrtty_printf("                          Tenure Live object Factor=\"%.3f\" Tenure non-leaf object factor=\"%.3f\" \n",
812       _tenureLiveObjectFactor, _tenureNonLeafObjectFactor);
813   omrtty_printf("                          Card Cleaning Factors: Pass1=\"%s\" Pass2=\"%s\"\n",
814       pass1Factor, pass2Factor);
815   omrtty_printf("                          Bytes traced in Pass 1 Factor=\"%.3f\"\n",
816       _bytesTracedInPass1Factor);
817  }
818 }

Others

System Info and VM Options

<system>
    <attribute name="physicalMemory" value="6442450944" />
    <attribute name="addressablePhysicalMemory" value="6442450944" />
    <attribute name="container memory limit set" value="true" />
    <attribute name="numCPUs" value="128" />
    <attribute name="numCPUs active" value="6" />
    <attribute name="architecture" value="amd64" />
    <attribute name="os" value="Linux" />
  </system>
  <vmargs>
    <vmarg name="-Xms1305m" />
    <vmarg name="-Xmx5222m" />
    <vmarg name="-Xgcpolicy:gencon" />
    <vmarg name="-Xgcthreads8" />
    <vmarg name="-XX:+IdleTuningGcOnIdle" />
    <vmarg name="-XX:+IdleTuningCompactOnIdle" />
    <vmarg name="-Xgc:debugConcurrentMark" />
  </vmargs>

GC Log

<concurrent-kickoff id="1045412" timestamp="2025-11-16T17:48:06.693">
  <kickoff reason="threshold reached" targetBytes="84250040149727" thresholdFreeBytes="11584374046720"  remainingFree="15789802480" tenureFreeBytes="678401736" nurseryFreeBytes="605936632" />
</concurrent-kickoff>
<exclusive-start id="1045413" timestamp="2025-11-16T17:48:06.770" intervalms="101.413">
  <response-info timems="13.842" idlems="6.004" threads="15" lastid="000000000180B800" lastname="CLISessionTask 24" />
</exclusive-start>
<cycle-start id="1045414" type="global" contextid="0" timestamp="2025-11-16T17:48:06.770" intervalms="470.252" />
<concurrent-start id="1045415" type="global mark" contextid="1045414" timestamp="2025-11-16T17:48:06.771">
</concurrent-start>
<exclusive-end id="1045416" timestamp="2025-11-16T17:48:06.772" durationms="1.808" />

<concurrent-end id="1045417" type="global mark" contextid="1045414" timestamp="2025-11-16T17:48:07.154">
<gc-op id="1045418" type="trace" timems="383.813" contextid="1045414" timestamp="2025-11-16T17:48:07.155">
  <trace bytesTraced="99729820" workStackOverflowCount="0" />
  <card-cleaning reason="card cleaning threshold reached" bytesTraced="0" cardsCleaned="30" />
</gc-op>
</concurrent-end>

<exclusive-start id="1045419" timestamp="2025-11-16T17:48:07.173" intervalms="402.801">
  <response-info timems="17.390" idlems="9.288" threads="64" lastid="0000000001987000" lastname="CLISessionTask 56" />
</exclusive-start>
<concurrent-global-final id="1045420" timestamp="2025-11-16T17:48:07.173" contextid="1045414" intervalms="504.409" >
  <concurrent-trace-info reason="card cleaning threshold reached" tracedByMutators="98357412" tracedByHelpers="1372408" cardsCleaned="30" workStackOverflowCount="0" />
</concurrent-global-final>
<gc-start id="1045421" type="global" contextid="1045414" timestamp="2025-11-16T17:48:07.175">
  <mem-info id="1045422" free="1254123288" total="2395209728" percent="52">
    <mem type="nursery" free="575722720" total="1368915968" percent="42">
      <mem type="allocate" free="575722720" total="919273472" percent="62" />
      <mem type="survivor" free="0" total="449642496" percent="0" />
    </mem>
    <mem type="tenure" free="678400568" total="1026293760" percent="66">
      <mem type="soa" free="668137016" total="1016030208" percent="65" />
      <mem type="loa" free="10263552" total="10263552" percent="100" />
    </mem>
    <remembered-set count="8236" />
  </mem-info>
</gc-start>
<allocation-stats totalBytes="29742584" discardedBytes="432776" >
  <allocated-bytes non-tlh="29274448" tlh="468136" />
  <largest-consumer threadName="CLISessionTask 59" threadId="0000000001992600" bytes="4631232" />
</allocation-stats>
<gc-op id="1045423" type="rs-scan" timems="5.782" contextid="1045414" timestamp="2025-11-16T17:48:07.181">
  <scan objectsFound="17004" bytesTraced="936144" workStackOverflowCount="0" />
</gc-op>
<gc-op id="1045424" type="card-cleaning" timems="1.226" contextid="1045414" timestamp="2025-11-16T17:48:07.183">
  <card-cleaning cardsCleaned="368" bytesTraced="479792" workStackOverflowCount="0" />
</gc-op>
<gc-op id="1045425" type="mark" timems="12.281" contextid="1045414" timestamp="2025-11-16T17:48:07.195">
  <trace-info objectcount="245094" scancount="188974" scanbytes="6259568" />
  <finalization candidates="1781" enqueued="3" />
  <ownableSynchronizers candidates="2118" cleared="3" />
  <references type="soft" candidates="7914" cleared="11" enqueued="0" dynamicThreshold="21" maxThreshold="32" />
  <references type="weak" candidates="12441" cleared="0" enqueued="0" />
  <references type="phantom" candidates="7418" cleared="0" enqueued="0" />
  <stringconstants candidates="26560" cleared="5"  />
  <object-monitors candidates="270" cleared="0"  />
</gc-op>
<gc-op id="1045426" type="sweep" timems="5.398" contextid="1045414" timestamp="2025-11-16T17:48:07.201" />
<gc-end id="1045427" type="global" contextid="1045414" durationms="28.127" usertimems="138.149" systemtimems="5.092" stalltimems="16.924" timestamp="2025-11-16T17:48:07.203" activeThreads="8">
  <mem-info id="1045428" free="1291703224" total="2395209728" percent="53">
    <mem type="nursery" free="610613600" total="1368915968" percent="44">
      <mem type="allocate" free="610613600" total="919273472" percent="66" />
      <mem type="survivor" free="0" total="449642496" percent="0" />
    </mem>
    <mem type="tenure" free="681089624" total="1026293760" percent="66" micro-fragmented="8258" macro-fragmented="356103648">
      <mem type="soa" free="670826072" total="1016030208" percent="66" />
      <mem type="loa" free="10263552" total="10263552" percent="100" />
    </mem>
    <pending-finalizers system="3" default="0" reference="0" classloader="0" />
    <remembered-set count="8231" />
  </mem-info>
</gc-end>
<cycle-end id="1045429" type="global" contextid="1045414" timestamp="2025-11-16T17:48:07.204" />
<exclusive-end id="1045430" timestamp="2025-11-16T17:48:07.204" durationms="30.723" />

tigerhsu avatar Nov 19 '25 08:11 tigerhsu