[Node stopped syncing after reset] (Version: [2.1.1])
Issue Description
Hi, I am running rippled testnet network nodes and stopped syncing after I performed a reset on my nodes because of https://xrpl.org/blog/2024/testnet-reset/.
Steps to Reproduce
I executed the following command on the rippled testnet network pods.
rm -r /var/lib/rippled/db/*
Expected Result
The rippled testnet network nodes should start syncing and catching up with the latest block/ledger height.
Actual Result
The rippled testnet network nodes have stopped syncing. I can see the following error message in the logs:
2024-Apr-20 13:24:29.074603826 UTC LedgerConsensus:WRN View of consensus changed during open status=open, mode=observing
2024-Apr-20 13:24:29.074648741 UTC LedgerConsensus:WRN F8B6B30308C0E4A41CE01822EE944ABAC1370626C015DDA78CB26B48E6304F90 to DB11E69578E710B42A686602B6207174BB330561DD1F24BBBB201BB87D759E88
2024-Apr-20 13:24:29.074699167 UTC LedgerConsensus:WRN {"account_hash":"7C60720487E78881BB1B940E770EC75EA59993144290710BFCA7DB36955AD680","close_flags":0,"close_time":766934670,"close_time_human":"2024-Apr-20 13:24:30.000000000 UTC","close_time_iso":"2024-04-20T13:24:30Z","close_time_resolution":10,"closed":true,"ledger_hash":"F8B6B30308C0E4A41CE01822EE944ABAC1370626C015DDA78CB26B48E6304F90","ledger_index":9537,"parent_close_time":766934662,"parent_hash":"10A468741A321313E9769CB8F5A06329F316A8269396D62DD3ADCDB0CC311FFB","total_coins":"99999999999950345","transaction_hash":"0000000000000000000000000000000000000000000000000000000000000000"}
2024-Apr-20 13:24:34.304580806 UTC LedgerHistory:ERR MISMATCH: seq=9539 built:CAC1C35672E673DBBBB263C4DB68114298B980DFCF97E7FAB0632C22CC0D244F then:2E6C5257FAA8ACA5E448E1CC99C5C57B313C7FAECA37E0A7CF3D872F7CCF72CE
2024-Apr-20 13:24:34.304628597 UTC LedgerHistory:ERR MISMATCH with same consensus transaction set: 1EDBC98082F609134C0CA4E18C0E7EA4E809DA8DB34AD26EF135F068F90E34A5
2024-Apr-20 13:24:34.304640088 UTC LedgerHistory:ERR MISMATCH with 0 built and 1 valid transactions.
2024-Apr-20 13:24:34.304721787 UTC LedgerHistory:ERR built
{
"account_hash" : "9B928F05EDEFBF9FD50DE483810E3CF1CB5242F1F5B01FA17C21C83B74B4A112",
"close_flags" : 0,
"close_time" : 766934672,
"close_time_human" : "2024-Apr-20 13:24:32.000000000 UTC",
"close_time_iso" : "2024-04-20T13:24:32Z",
"close_time_resolution" : 10,
"closed" : true,
"ledger_hash" : "CAC1C35672E673DBBBB263C4DB68114298B980DFCF97E7FAB0632C22CC0D244F",
"ledger_index" : 9539,
"parent_close_time" : 766934671,
"parent_hash" : "CE527ED7672B89ECEC14C75E8940DF6C1EA353A73F47C9F0AC22A318EF32A402",
"total_coins" : "99999999999950333",
"transaction_hash" : "0000000000000000000000000000000000000000000000000000000000000000"
}
2024-Apr-20 13:24:34.304762158 UTC LedgerHistory:ERR valid
Environment
I am running my node with the Docker image xrpllabsofficial/xrpld:2.1.1.
Supporting Files
dispatch_group_enter/dispatch_group_leave is your App code, not SDWebImage's code
You can not (currently) assume each sd_setImage call must always callback. So the enter and leave may not get balanced. I guess this is a bug, but need more info to investigate
这个错误我也捕捉不到,我也是根据崩溃反馈,所拿到的崩溃在sebwebimg里面了,求大佬指点
#import "NK_MicIconsView.h"
@implementation NK_MicIconsView
-
(void)setCellData:(NK_ChatMsgModel *)nk_model finished:(void (^)(CGFloat totalW))finished{
if (nk_model.enter_user_type == 1) { _userTypeImgV.image = kImageNamed(@"room_mic_fang"); }else if (nk_model.enter_user_type == 2) { _userTypeImgV.image = kImageNamed(@"room_mic_guan"); }else if (nk_model.enter_user_type == 3) { _userTypeImgV.image = kImageNamed(@"room_mic_chao"); }else { _userTypeImgV.image = nil; }
_jwImgVWC.constant = 0;
_vipImgVLeftCon.constant = 0; _vipImgVWC.constant = 0;
_gxImgVLeftCon.constant = 0; _gxImgVWC.constant = 0;
_mlImgVLeftCon.constant = 0; _mlImgVWC.constant = 0;
_expLevlImgVLeftCon.constant = 0; _expLevlImgVWC.constant = 0;
_userImgVLeftCon.constant = 0;
dispatch_group_t group = dispatch_group_create();
WEAK_SELF __block CGFloat totalW = 0; if (nk_model.nobility_image.length > 0) { dispatch_group_enter(group); [_jueweiImgV nk_setImageWithURL:kURL(nk_model.nobility_image) completed:^(UIImage * _Nullable image, NSError * _Nullable error, SDImageCacheType cacheType, NSURL * _Nullable imageURL) { if (image){ CGSize size = image.size; CGFloat h = size.height>0 ? size.height : 1; CGFloat newW = size.width/h * 24; nk_weakSelf.jwImgVWC.constant = newW; totalW += (newW+5); } dispatch_group_leave(group); }]; }
if (nk_model.vip_level_image.length > 0) { _vipImgVLeftCon.constant = 5; dispatch_group_enter(group); [_vipImgV nk_setImageWithURL:kURL(nk_model.vip_level_image) completed:^(UIImage * _Nullable image, NSError * _Nullable error, SDImageCacheType cacheType, NSURL * _Nullable imageURL) { if (image){ CGSize size = image.size; CGFloat h = size.height>0 ? size.height : 1; CGFloat newW = size.width/h * 20; nk_weakSelf.vipImgVWC.constant = newW; totalW += (newW+5); } dispatch_group_leave(group); }];
}
if (nk_model.contribution_level_image.length > 0) { _gxImgVLeftCon.constant = 5; _gxImgVWC.constant = 20; dispatch_group_enter(group); [_gxImgV nk_setImageWithURL:kURL(nk_model.contribution_level_image) completed:^(UIImage * _Nullable image, NSError * _Nullable error, SDImageCacheType cacheType, NSURL * _Nullable imageURL) { if (image){ CGSize size = image.size; CGFloat h = size.height>0 ? size.height : 1; CGFloat newW = size.width/h * 20; nk_weakSelf.gxImgVWC.constant = newW; totalW += (newW+5); } dispatch_group_leave(group); }];
}
if (nk_model.charm_level_image.length > 0) { _mlImgVLeftCon.constant = 5; dispatch_group_enter(group);
[_mlImgV nk_setImageWithURL:kURL(nk_model.charm_level_image) completed:^(UIImage * _Nullable image, NSError * _Nullable error, SDImageCacheType cacheType, NSURL * _Nullable imageURL) { if (image){ CGSize size = image.size; CGFloat h = size.height>0 ? size.height : 1; CGFloat newW = size.width/h * 20; nk_weakSelf.mlImgVWC.constant = newW; totalW += (newW+5); } dispatch_group_leave(group); }];}
if (nk_model.exp_level_image.length > 0) { _expLevlImgVLeftCon.constant = 5; dispatch_group_enter(group);
[_expLevlImgV nk_setImageWithURL:kURL(nk_model.exp_level_image) completed:^(UIImage * _Nullable image, NSError * _Nullable error, SDImageCacheType cacheType, NSURL * _Nullable imageURL) { if (image){ CGSize size = image.size; CGFloat h = size.height>0 ? size.height : 1; CGFloat newW = size.width/h * 20; nk_weakSelf.expLevlImgVWC.constant = newW; totalW += (newW+5); } dispatch_group_leave(group); }];}
if (nk_model.enter_user_type == 1 || nk_model.enter_user_type == 2 || nk_model.enter_user_type == 3) { totalW += 25; _userImgVLeftCon.constant = 5; _userImgVWC.constant = 20; }
dispatch_group_notify(group, dispatch_get_main_queue(), ^{ if (finished){ finished(totalW); } // MYLog(@"--------totalW--------%.2f",totalW); [self mas_updateConstraints:^(MASConstraintMaker *nk_make) { nk_make.width.mas_equalTo(totalW); }]; }); }
@end
dispatch_group_enter/dispatch_group_leaveis your App code, not SDWebImage's codeYou can not (currently) assume each
sd_setImagecall must always callback. So the enter and leave may not get balanced. I guess this is a bug, but need more info to investigate
If dispatch_group_enter/dispatch_group_leave cannot be used, how to achieve similar requirements?
Maybe you can try dispatch_semaphore to limit the number of concurrent. like this: dispatch_semaphore_t semaphore = dispatch_semaphore_create(0); When the asynchronous task is completed, you can add this: dispatch_semaphore_signal(semaphore); before the last task, you can add this : dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
dispatch_group_enter/dispatch_group_leaveis your App code, not SDWebImage's code You can not (currently) assume eachsd_setImagecall must always callback. So the enter and leave may not get balanced. I guess this is a bug, but need more info to investigateIf dispatch_group_enter
/dispatch_group_leave cannot be used, how to achieve similar requirements?