js-libp2p
js-libp2p copied to clipboard
ERR_ENCRYPTION_FAILED tryng to connect to go-ipfs
I’m getting ERR_ENCRYPTION_FAILED when trying to connect from my node app using js-libp2p 1.2.1 to a go-ipfs node v0.4.18. Se below for details information, code and config settings.
My Node app code:
libp2p settings
export const libp2pOptions = {
addresses: {
listen,
},
transports: [
webSockets({
filter: all,
}),
circuitRelayTransport({
discoverRelays: 1,
}),
tcp(),
],
connectionEncryption: [noise()],
streamMuxers: [yamux()],
connectionGater: {
denyDialMultiaddr: () => false,
},
services: {
identify: identify(),
pubsub: gossipsub({ allowPublishToZeroPeers: true }),
},
peerDiscovery: [
mdns({ interval: 1000 }),
bootstrap({
list,
timeout: 1000, // in ms,
tagName: 'bootstrap',
tagValue: 50,
tagTTL: 120000, // in ms
}),
],
};
connection routines:
import { createLibp2p } from 'libp2p'; // import the libp2p module
import { LevelBlockstore } from 'blockstore-level’;
import { createHelia } from 'helia’;
import { libp2pOptions } from './libp2pConfig.js’;
const blockstore = new LevelBlockstore(blockstorePath);
const libp2p = await createLibp2p(libp2pOptions);
let ipfs = await createHelia({ libp2p, blockstore });
let node = ipfs.libp2p;
const remoteMultiaddr = `/ip4/${PEER_IP}/tcp/${PEER_PORT}/ws/p2p/${PEER_ID}`;
const multiAddr = multiaddr(remoteMultiaddr);
try {
const { status, remoteAddr, remotePeer, direction } = await node.dial(
multiAddr
);
console.log({ status, remoteAddr, remotePeer, direction });
} catch (error) {
console.log(error);
}
node.addEventListener('peer:discovery', (evt) => {
logger.info(`discovered peer ${evt.detail.id.toString()}`);
});
node.addEventListener('peer:connect', (evt) => {
logger.info(`connected to ${evt.detail.toString()}`);
});
go-ipfs config:
{
"API": {
"HTTPHeaders": {
"Access-Control-Allow-Headers": [
"X-Requested-With",
"Access-Control-Expose-Headers",
"Range"
],
"Access-Control-Allow-Methods": [
"PUT",
"GET",
"POST"
],
"Access-Control-Allow-Origin": [
"*"
],
"Access-Control-Expose-Headers": [
"Location",
"Ipfs-Hash"
],
"X-Special-Header": [
"Access-Control-Expose-Headers: Ipfs-Hash"
]
}
},
"Addresses": {
"API": "/ip4/127.0.0.1/tcp/5001",
"Announce": [],
"Gateway": "/ip4/127.0.0.1/tcp/8080",
"NoAnnounce": [],
"Swarm": [
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001"
]
},
"Bootstrap": [
"/ip4/10.128.0.2/tcp/4001/ipfs/QmVKuLVFyBX2ZSyB2w7YLpH4AM7YCdaqdNRjwZa9cR335M"
],
"Datastore": {
"BloomFilterSize": 0,
"GCPeriod": "1h",
"HashOnRead": false,
"Spec": {
"mounts": [
{
"child": {
"path": "blocks",
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
"sync": true,
"type": "flatfs"
},
"mountpoint": "/blocks",
"prefix": "flatfs.datastore",
"type": "measure"
},
{
"child": {
"compression": "none",
"path": "datastore",
"type": "levelds"
},
"mountpoint": "/",
"prefix": "leveldb.datastore",
"type": "measure"
}
],
"type": "mount"
},
"StorageGCWatermark": 90,
"StorageMax": "5GB"
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 10
}
},
"Experimental": {
"FilestoreEnabled": false,
"Libp2pStreamMounting": false,
"P2pHttpProxy": false,
"QUIC": false,
"ShardingEnabled": false,
"UrlstoreEnabled": false
},
"Gateway": {
"APICommands": [],
"HTTPHeaders": {
"Access-Control-Allow-Headers": [
"X-Requested-With",
"Range"
],
"Access-Control-Allow-Methods": [
"GET",
"POST"
],
"Access-Control-Allow-Origin": [
"*"
]
},
"PathPrefixes": [],
"RootRedirect": "",
"Writable": true
},
"Identity": {
"PeerID": "QmVKuLVFyBX2ZSyB2w7YLpH4AM7YCdaqdNRjwZa9cR335M"
},
"Ipns": {
"RecordLifetime": "",
"RepublishPeriod": "",
"ResolveCacheSize": 128
},
"Mounts": {
"FuseAllowOther": false,
"IPFS": "/ipfs",
"IPNS": "/ipns"
},
"Pubsub": {
"DisableSigning": false,
"Router": "",
"StrictSignatureVerification": false
},
"Reprovider": {
"Interval": "12h",
"Strategy": "all"
},
"Routing": {
"Type": "dht"
},
"Swarm": {
"AddrFilters": null,
"ConnMgr": {
"GracePeriod": "20s",
"HighWater": 900,
"LowWater": 600,
"Type": "basic"
},
"DisableBandwidthMetrics": false,
"DisableNatPortMap": false,
"DisableRelay": false,
"EnableRelayHop": false
}
}
Oh, wow - that version of go-ipfs/kubo was released five years ago. I'm not sure it's still supported - is it not possible for you to upgrade?
yeah, it’s really old. I thought that could be the problem. I will ask if the upgrade its possible.
@achingbrain I tested agains a newversion using kubo ipfs 0.27, but exact same error. Could it be that I’m setting Noise as encryption in libp2p, but no encryption is setup in the remote node? At least that I’m aware of, this is the config of the remote node:
developer@ipfs-node-1beta:/home/msalimbene$ ipfs version
ipfs version 0.27.0
developer@ipfs-node-1beta:/home/msalimbene$ ipfs config show
{
"API": {
"HTTPHeaders": {
"Access-Control-Allow-Methods": [
"PUT",
"POST"
],
"Access-Control-Allow-Origin": [
"http://localhost:3000",
"http://127.0.0.1:5001"
]
}
},
"Addresses": {
"API": "/ip4/127.0.0.1/tcp/5001",
"Announce": null,
"AppendAnnounce": null,
"Gateway": "/ip4/127.0.0.1/tcp/8080",
"NoAnnounce": null,
"Swarm": [
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001",
"/ip4/0.0.0.0/udp/4001/quic-v1",
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
"/ip6/::/udp/4001/quic-v1",
"/ip6/::/udp/4001/quic-v1/webtransport"
]
},
"AutoNAT": {},
"Bootstrap": [
"/ip4/10.128.0.2/tcp/4001/p2p/QmVKuLVFyBX2ZSyB2w7YLpH4AM7YCdaqdNRjwZa9cR335M"
],
"DNS": {
"Resolvers": null
},
"Datastore": {
"BloomFilterSize": 0,
"GCPeriod": "1h",
"HashOnRead": false,
"Spec": {
"mounts": [
{
"child": {
"path": "blocks",
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
"sync": true,
"type": "flatfs"
},
"mountpoint": "/blocks",
"prefix": "flatfs.datastore",
"type": "measure"
},
{
"child": {
"compression": "none",
"path": "datastore",
"type": "levelds"
},
"mountpoint": "/",
"prefix": "leveldb.datastore",
"type": "measure"
}
],
"type": "mount"
},
"StorageGCWatermark": 90,
"StorageMax": "10GB"
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 10
}
},
"Experimental": {
"FilestoreEnabled": false,
"Libp2pStreamMounting": false,
"OptimisticProvide": false,
"OptimisticProvideJobsPoolSize": 0,
"P2pHttpProxy": false,
"QUIC": false,
"ShardingEnabled": false,
"StrategicProviding": false,
"UrlstoreEnabled": false
},
"Gateway": {
"APICommands": [],
"DeserializedResponses": null,
"DisableHTMLErrors": null,
"ExposeRoutingAPI": null,
"HTTPHeaders": {
"Access-Control-Allow-Headers": [
"X-Requested-With",
"Range"
]
},
"NoDNSLink": false,
"NoFetch": false,
"PathPrefixes": [],
"PublicGateways": null,
"RootRedirect": "",
"Writable": false
},
"Identity": {
"PeerID": "12D3KooWGnC5m82ccmXayxe5qYtH1PupxuXtZpWEe2swTPfbn4Bk"
},
"Internal": {},
"Ipns": {
"RecordLifetime": "",
"RepublishPeriod": "",
"ResolveCacheSize": 128
},
"Migration": {
"DownloadSources": null,
"Keep": ""
},
"Mounts": {
"FuseAllowOther": false,
"IPFS": "/ipfs",
"IPNS": "/ipns"
},
"Peering": {
"Peers": null
},
"Pinning": {},
"Plugins": {
"Plugins": null
},
"Provider": {
"Strategy": ""
},
"Pubsub": {
"DisableSigning": false,
"Router": "",
"StrictSignatureVerification": false
},
"Reprovider": {},
"Routing": {
"AcceleratedDHTClient": false,
"Methods": null,
"Routers": null
},
"Swarm": {
"AddrFilters": null,
"ConnMgr": {},
"DisableBandwidthMetrics": false,
"DisableNatPortMap": false,
"DisableRelay": false,
"EnableRelayHop": false,
"RelayClient": {},
"RelayService": {},
"ResourceMgr": {},
"Transports": {
"Multiplexers": {},
"Network": {},
"Security": {}
}
}
}
This is the exact err message:
at TCP.onStreamRead (node:internal/stream_base_commons:217:20) {
errno: -54,
code: 'ECONNRESET',
syscall: 'read'
} +14s
CodeError: read ECONNRESET
at DefaultUpgrader._encryptOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:520:19)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async DefaultUpgrader.upgradeOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:199:21)
at async TCP.dial (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/tcp/dist/src/index.js:79:22)
at async DefaultTransportManager.dial (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/transport-manager.js:81:20)
at async queue.add.peerId.peerId [as fn] (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/connection-manager/dial-queue.js:154:38)
at async raceSignal (file:///Users/salimbene/dev/ipfs/node_modules/race-signal/dist/src/index.js:28:16)
at async Job.run (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/utils/dist/src/queue/job.js:56:28) {
code: 'ERR_ENCRYPTION_FAILED',
props: {}
}
I’ve tried different encryption settings on my side (libp2p) and I get different errors under code ERR_ENCRYPTION_FAILED, see below:
- Trying with no encryption method I get
CodeError: At least one protocol must be specifiedwhich makes sense:
CodeError: At least one protocol must be specified
at DefaultUpgrader._encryptOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:520:19)
at async DefaultUpgrader.upgradeOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:199:21)
at async TCP.dial (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/tcp/dist/src/index.js:79:22)
at async DefaultTransportManager.dial (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/transport-manager.js:81:20)
at async queue.add.peerId.peerId [as fn] (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/connection-manager/dial-queue.js:154:38)
at async raceSignal (file:///Users/salimbene/dev/ipfs/node_modules/race-signal/dist/src/index.js:28:16)
at async Job.run (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/utils/dist/src/queue/job.js:56:28) {
code: 'ERR_ENCRYPTION_FAILED',
props: {}
}
using i connectionEncryption: [noise()] get CodeError: read ECONNRESET
CodeError: read ECONNRESET
at DefaultUpgrader._encryptOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:520:19)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async DefaultUpgrader.upgradeOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:199:21)
at async TCP.dial (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/tcp/dist/src/index.js:79:22)
at async DefaultTransportManager.dial (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/transport-manager.js:81:20)
at async queue.add.peerId.peerId [as fn] (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/connection-manager/dial-queue.js:154:38)
at async raceSignal (file:///Users/salimbene/dev/ipfs/node_modules/race-signal/dist/src/index.js:28:16)
at async Job.run (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/utils/dist/src/queue/job.js:56:28) {
code: 'ERR_ENCRYPTION_FAILED',
props: {}
}
and using connectionEncryption: [plaintext()] I get CodeError: message length length too long
CodeError: message length length too long
at DefaultUpgrader._encryptOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:520:19)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async DefaultUpgrader.upgradeOutbound (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/upgrader.js:199:21)
at async TCP.dial (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/tcp/dist/src/index.js:79:22)
at async DefaultTransportManager.dial (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/transport-manager.js:81:20)
at async queue.add.peerId.peerId [as fn] (file:///Users/salimbene/dev/ipfs/node_modules/libp2p/dist/src/connection-manager/dial-queue.js:154:38)
at async raceSignal (file:///Users/salimbene/dev/ipfs/node_modules/race-signal/dist/src/index.js:28:16)
at async Job.run (file:///Users/salimbene/dev/ipfs/node_modules/@libp2p/utils/dist/src/queue/job.js:56:28) {
code: 'ERR_ENCRYPTION_FAILED',
props: {}
}
@achingbrain It appears that the version is actually the problem here. I’ve got a kubo-ipfs 0.27 deployed and I was able to connect to it. (I also need pass a swarm.key to libp2p config).
I’m unable to connect to the older version of ipfs (go-ipfs 0.4.18). I’m getting error "protocol selection failed”. Perhaps that older version doesn’t support encryption.
Long story short, I can connect libp2p to kubo-ipfs 0.27, but not to go-ipfs 0.4.18.
I'm getting the same error, but not always depending on the remote node.
I have two nodes running kubo v0.32.1 with same configuration and exposing their endpoints:
/dns/websocket.datapod.coinduf.eu/tcp/443/wss/p2p/12D3KooWFp4JsoDo5FX8CFLtyJjaWWRZ8q3gr8uT2s9To2GYzRNA/dns/websocket.datapod.gyroi.de/tcp/443/wss/p2p/12D3KooWAHf2cyDysXXP1xaAt75dNviRhF2T9QfnQGGZ6kSXvMwK
My browser app using libp2p v2.3 and libp2p/websocket v9.0 can connect to the first one but gets the following error for the second one:
Uncaught (in promise) EncryptionFailedError: unexpected end of input
EncryptionFailedError errors.ts:92
_encryptOutbound upgrader.ts:693
_performUpgrade upgrader.ts:284
upgradeOutbound upgrader.ts:231
I do not know how to debug this.
[edit]
I have news on this!! I reinitialized my second Kubo node from scratch (I had no data to keep), and the new endpoint started working:
/dns/websocket.datapod.gyroi.de/tcp/443/wss/p2p/12D3KooWQdw3ptcSk1exiBTBDGbFzLEhoVT3zyeoU1zrpswb3qyL
There might have been a data corruption somewhere during a kubo upgrade, but this was leading to an uncaught client-side error.
Closing as this issue appears to have been resolved.