datastore grows in size; StorageMax not respected since 0.39.0
Checklist
- [x] This is a bug report, not a question. Ask questions on discuss.ipfs.tech.
- [x] I have searched on the issue tracker for my bug.
- [x] I am running the latest kubo version or have an issue updating.
Installation method
dist.ipfs.tech or ipfs-update
Version
Kubo version: 0.39.0
Repo version: 18
System version: amd64/linux
Golang version: go1.25.4
Config
{
"API": {
"HTTPHeaders": {
"Access-Control-Allow-Methods": [
"PUT",
"POST"
],
"Access-Control-Allow-Origin": [
"http://localhost:5001",
"http://127.0.0.1:5001",
]
}
},
"Addresses": {
"API": [
"/ip4/0.0.0.0/tcp/5001",
"/ip6/::/tcp/5001"
],
"Announce": [
"/ip4/185.40.199.126/tcp/4001",
"/ip4/185.40.199.126/udp/4001/webrtc-direct",
"/ip4/185.40.199.126/udp/4001/quic-v1",
"/ip4/185.40.199.126/udp/4001/quic-v1/webtransport",
"/ip6/2a00:c90::56:2/tcp/4001",
"/ip6/2a00:c90::56:2/udp/4001/webrtc-direct",
"/ip6/2a00:c90::56:2/udp/4001/quic-v1",
"/ip6/2a00:c90::56:2/udp/4001/quic-v1/webtransport"
],
"AppendAnnounce": null,
"Gateway": [
"/ip4/0.0.0.0/tcp/8080",
"/ip6/::/tcp/8080"
],
"NoAnnounce": [
"/ip4/10.0.0.0/ipcidr/8",
"/ip4/100.64.0.0/ipcidr/10",
"/ip4/169.254.0.0/ipcidr/16",
"/ip4/172.16.0.0/ipcidr/12",
"/ip4/192.0.0.0/ipcidr/24",
"/ip4/192.0.2.0/ipcidr/24",
"/ip4/192.168.0.0/ipcidr/16",
"/ip4/198.18.0.0/ipcidr/15",
"/ip4/198.51.100.0/ipcidr/24",
"/ip4/203.0.113.0/ipcidr/24",
"/ip4/240.0.0.0/ipcidr/4",
"/ip6/100::/ipcidr/64",
"/ip6/2001:2::/ipcidr/48",
"/ip6/2001:db8::/ipcidr/32",
"/ip6/fc00::/ipcidr/7",
"/ip6/fe80::/ipcidr/10"
],
"Swarm": [
"/ip4/0.0.0.0/tcp/4001",
"/ip4/0.0.0.0/udp/4001/quic-v1",
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
"/ip6/::/tcp/4001",
"/ip6/::/udp/4001/quic-v1",
"/ip6/::/udp/4001/quic-v1/webtransport"
]
},
"AutoConf": {},
"AutoNAT": {},
"Bootstrap": [
"auto"
],
"DNS": {
"Resolvers": {
".": "auto"
}
},
"Datastore": {
"BloomFilterSize": 0,
"GCPeriod": "1h",
"HashOnRead": false,
"Spec": {
"mounts": [
{
"child": {
"path": "blocks",
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
"sync": true,
"type": "flatfs"
},
"mountpoint": "/blocks",
"prefix": "flatfs.datastore",
"type": "measure"
},
{
"child": {
"compression": "none",
"path": "datastore",
"type": "levelds"
},
"mountpoint": "/",
"prefix": "leveldb.datastore",
"type": "measure"
}
],
"type": "mount"
},
"StorageGCWatermark": 90,
"StorageMax": "5GB"
},
"Discovery": {
"MDNS": {
"Enabled": false
}
},
"Experimental": {
"FilestoreEnabled": false,
"GraphsyncEnabled": false,
"Libp2pStreamMounting": false,
"P2pHttpProxy": false,
"StrategicProviding": false,
"UrlstoreEnabled": false
},
"Gateway": {
"APICommands": [],
"HTTPHeaders": {},
"NoDNSLink": false,
"NoFetch": false,
"PathPrefixes": [],
"PublicGateways": {
"localhost": null
},
"RootRedirect": "",
"Writable": false
},
"Identity": {
"PeerID": "<redacted>",
"PrivKey": "<redacted>"
},
"Internal": {},
"Ipns": {
"DelegatedPublishers": [
"auto"
],
"RecordLifetime": "",
"RepublishPeriod": "",
"ResolveCacheSize": 128
},
"Migration": {
"DownloadSources": [],
"Keep": ""
},
"Mounts": {
"FuseAllowOther": false,
"IPFS": "/ipfs",
"IPNS": "/ipns"
},
"Peering": {
"Peers": [
{
"Addrs": [
"/ip4/146.59.1.158/tcp/4001",
"/ip4/146.59.1.158/udp/4001/webrtc-direct",
"/ip4/146.59.1.158/udp/4001/quic-v1",
"/ip4/146.59.1.158/udp/4001/quic-v1/webtransport",
"/ip6/2001:41d0:602:3d9e::1/tcp/4001",
"/ip6/2001:41d0:602:3d9e::1/udp/4001/webrtc-direct",
"/ip6/2001:41d0:602:3d9e::1/udp/4001/quic-v1",
"/ip6/2001:41d0:602:3d9e::1/udp/4001/quic-v1/webtransport",
"/ip4/10.35.17.9/tcp/4001",
"/ip4/10.35.17.9/udp/4001/webrtc-direct",
"/ip4/10.35.17.9/udp/4001/quic-v1",
"/ip4/10.35.17.9/udp/4001/quic-v1/webtransport",
"/ip6/fde8:87d3:9415:9832::9/tcp/4001",
"/ip6/fde8:87d3:9415:9832::9/udp/4001/webrtc-direct",
"/ip6/fde8:87d3:9415:9832::9/udp/4001/quic-v1",
"/ip6/fde8:87d3:9415:9832::9/udp/4001/quic-v1/webtransport"
],
"ID": "12D3KooWSMyfy3sqRyd6Q1VA85DSedLrkaoQKSx551BEZHjwkx1k"
},
{
"Addrs": [
"/ip4/185.236.108.204/tcp/4001",
"/ip4/185.236.108.204/udp/4001/webrtc-direct",
"/ip4/185.236.108.204/udp/4001/quic-v1",
"/ip4/185.236.108.204/udp/4001/quic-v1/webtransport",
"/ip4/10.35.18.2/tcp/4001",
"/ip4/10.35.18.2/udp/4001/webrtc-direct",
"/ip4/10.35.18.2/udp/4001/quic-v1",
"/ip4/10.35.18.2/udp/4001/quic-v1/webtransport",
"/ip6/fde8:87d3:9415:9833::2/tcp/4001",
"/ip6/fde8:87d3:9415:9833::2/udp/4001/webrtc-direct",
"/ip6/fde8:87d3:9415:9833::2/udp/4001/quic-v1",
"/ip6/fde8:87d3:9415:9833::2/udp/4001/quic-v1/webtransport"
],
"ID": "12D3KooWMB9trMK6nwymDYZiTh2niKncoZydSf5A9AkKyeDcdws9"
}
]
},
"Pinning": {
"RemoteServices": {}
},
"Plugins": {
"Plugins": null
},
"Pubsub": {
"DisableSigning": false,
"Router": ""
},
"Routing": {
"AcceleratedDHTClient": false,
"DelegatedRouters": [
"auto"
],
"Routers": null
},
"Swarm": {
"AddrFilters": [
"/ip4/10.0.0.0/ipcidr/8",
"/ip4/100.64.0.0/ipcidr/10",
"/ip4/169.254.0.0/ipcidr/16",
"/ip4/172.16.0.0/ipcidr/12",
"/ip4/192.0.0.0/ipcidr/24",
"/ip4/192.0.2.0/ipcidr/24",
"/ip4/192.168.0.0/ipcidr/16",
"/ip4/198.18.0.0/ipcidr/15",
"/ip4/198.51.100.0/ipcidr/24",
"/ip4/203.0.113.0/ipcidr/24",
"/ip4/240.0.0.0/ipcidr/4",
"/ip6/100::/ipcidr/64",
"/ip6/2001:2::/ipcidr/48",
"/ip6/2001:db8::/ipcidr/32",
"/ip6/fc00::/ipcidr/7",
"/ip6/fe80::/ipcidr/10"
],
"ConnMgr": {},
"DisableBandwidthMetrics": false,
"DisableNatPortMap": true,
"RelayClient": {},
"RelayService": {},
"ResourceMgr": {},
"Transports": {
"Multiplexers": {},
"Network": {},
"Security": {}
}
}
}
Description
After update to 0.39.0 the datastore started to gain weight quite quickly.
Initially I had Datastore.StorageMax set to default 10G. I changed it to 5GB and restarted kubo. A restart is purging the datastore. It grew again to 8.4G, which is way beyond 5GB. I'm running "ipfs repo gc" daily from cron. Even when run manually, it changes nothing.
Triage:
-
StorageMaxis no limit, it's just used to decide if gc should be triggered when--enable-gcis used (false by default). See: https://github.com/ipfs/kubo/blob/master/docs/config.md#datastorestoragemax -
The increased disk usage rate might come from Provide.Sweep (https://github.com/ipfs/kubo/blob/master/docs/config.md#providedhtsweepenabled) but it seems like too much. So:
- You can disable it and see if the rate of storage growth slows down (let us know)
- Otherwise, we are assuming you have not changed your regular usage (i.e. fetching more things into the repo).
I set Provide.DHT.SweepEnabled: false. Let's wait for results...
Yep. The disk usage stays flat.
Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 7 days.