Output a lot of information and exit automatically
Issue Details
The configuration is as follows:
{
admin off
servers {
protocols h1 h2
}
}
http://127.0.0.1:80 {
reverse_proxy 127.0.0.1:8080 {
header_up Host {host}
header_up X-Real-IP {remote_host}
transport http {
dial_timeout 5s
response_header_timeout 60s
read_buffer 128k
write_buffer 128k
}
# WebSocket 支持
flush_interval -1
}
}
When an error occurs, a large amount of the following information will be output:
net/dial.go:686 +0x248 fp=0xc019b71460 sp=0xc019b71358 pc=0x7ff798b66208
net.(*sysDialer).dialParallel(0x0?, {0x7ff79a85bba0?, 0xc016176700?}, {0xc01178de70?, 0x7ff79a04c440?, 0x7ff79a2f2564?}, {0x0?, 0x7ff79a2f19b0?, 0x7ff79a325363?})
net/dial.go:587 +0x30a fp=0xc019b71678 sp=0xc019b71460 pc=0x7ff798b658ea
net.(*Dialer).DialContext(0xc000642630, {0x7ff79a85bb30, 0xc0120ec0a0}, {0x7ff79a2f19b0, 0x3}, {0xc012fdcc90, 0xe})
net/dial.go:578 +0x6a9 fp=0xc019b717e8 sp=0xc019b71678 pc=0x7ff798b651c9
github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy.(*HTTPTransport).NewTransport.func2({0x7ff79a85bb30, 0xc0120ec0a0}, {0x7ff79a2f19b0, 0x3}, {0xc012fdcc90, 0xe})
github.com/caddyserver/caddy/[email protected]/modules/caddyhttp/reverseproxy/httptransport.go:284 +0x1cf fp=0xc019b719c0 sp=0xc019b717e8 pc=0x7ff799ca050f
net/http.(*Transport).dial(0x7ff798a6fa19?, {0x7ff79a85bb30?, 0xc0120ec0a0?}, {0x7ff79a2f19b0?, 0x0?}, {0xc012fdcc90?, 0xc000086008?})
net/http/transport.go:1278 +0xd2 fp=0xc019b71a28 sp=0xc019b719c0 pc=0x7ff798dd4672
net/http.(*Transport).dialConn(0xc00062e680, {0x7ff79a85bb30, 0xc0120ec0a0}, {{}, 0x0, {0x7ff79a2f2554, 0x4}, {0xc012fdcc90, 0xe}, 0x0})
net/http/transport.go:1780 +0x7e5 fp=0xc019b71ee0 sp=0xc019b71a28 pc=0x7ff798dd7625
net/http.(*Transport).dialConnFor(0xc00062e680, 0xc011ae7760)
net/http/transport.go:1615 +0xb8 fp=0xc019b71f90 sp=0xc019b71ee0 pc=0x7ff798dd6138
net/http.(*Transport).startDialConnForLocked.func1()
net/http/transport.go:1597 +0x35 fp=0xc019b71fe0 sp=0xc019b71f90 pc=0x7ff798dd5f75
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc019b71fe8 sp=0xc019b71fe0 pc=0x7ff798a77be1
created by net/http.(*Transport).startDialConnForLocked in goroutine 5963650
net/http/transport.go:1596 +0x112
goroutine 7322900 gp=0xc019b4ddc0 m=nil [runnable]:
runtime.cgocall(0x7ff798a79400, 0xc019b55b58)
runtime/cgocall.go:167 +0x3e fp=0xc019b7ed70 sp=0xc019b7ed08 pc=0x7ff798a6bf9e
runtime.syscall_syscalln(0x10?, 0x7ff798a0acbb?, {0xc019b7edb8?, 0x0?, 0x0?})
runtime/syscall_windows.go:521 +0x4e fp=0xc019b7ed90 sp=0xc019b7ed70 pc=0x7ff798a5844e
syscall.Syscall(0xc0158e6020?, 0xc019b7ee18?, 0x7ff798a6cb05?, 0x1?, 0xc019b7ee40?)
runtime/syscall_windows.go:457 +0x29 fp=0xc019b7ede0 sp=0xc019b7ed90 pc=0x7ff798a735c9
syscall.bind(0x4a6a4, 0x7ff79a14b200?, 0x10)
syscall/zsyscall_windows.go:1344 +0x65 fp=0xc019b7ee28 sp=0xc019b7ede0 pc=0x7ff798ad8105
syscall.Bind(0x4a6a4, {0x7ff79a841e00?, 0xc0158e6020?})
syscall/syscall_windows.go:981 +0x46 fp=0xc019b7ee50 sp=0xc019b7ee28 pc=0x7ff798ad04a6
net.(*netFD).connect(0xc019b51688, {0x7ff79a85bba0, 0xc01612f960}, {0x0, 0x0?}, {0x7ff79a841e00, 0xc0158e6000})
net/fd_windows.go:137 +0x389 fp=0xc019b7ef90 sp=0xc019b7ee50 pc=0x7ff798b708c9
net.(*netFD).dial(0xc019b51688, {0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a8637e0?, 0x0?}, {0x7ff79a8637e0, 0xc015714210}, 0x7ff798b7486b?)
net/sock_posix.go:124 +0x3c5 fp=0xc019b7f068 sp=0xc019b7ef90 pc=0x7ff798b89ca5
net.socket({0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a2f19b0, 0x3}, 0x2, 0x1, 0x0?, 0x0, {0x7ff79a8637e0, 0x0}, ...)
net/sock_posix.go:70 +0x2af fp=0xc019b7f110 sp=0xc019b7f068 pc=0x7ff798b897ef
net.internetSocket({0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a2f19b0, 0x3}, {0x7ff79a8637e0, 0x0}, {0x7ff79a8637e0?, 0xc015714210?}, 0x1, 0x0, ...)
net/ipsock_posix.go:167 +0x1e5 fp=0xc019b7f198 sp=0xc019b7f110 pc=0x7ff798b7a545
net.(*sysDialer).doDialTCPProto(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, 0x0, 0xc015714210, 0x0)
net/tcpsock_posix.go:85 +0xe7 fp=0xc019b7f248 sp=0xc019b7f198 pc=0x7ff798b8d487
net.(*sysDialer).doDialTCP(...)
net/tcpsock_posix.go:75
net.(*sysDialer).dialTCP(0x7ff798a6beb9?, {0x7ff79a85bba0?, 0xc01612f960?}, 0x7ff799f49320?, 0xc019b7f320?)
net/tcpsock_posix.go:71 +0x69 fp=0xc019b7f288 sp=0xc019b7f248 pc=0x7ff798b8d329
net.(*sysDialer).dialSingle(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a850350, 0xc015714210})
net/dial.go:721 +0x3ce fp=0xc019b7f358 sp=0xc019b7f288 pc=0x7ff798b66a2e
net.(*sysDialer).dialSerial(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, {0xc011c37af0?, 0x1, 0x7ff798b79bbe?})
net/dial.go:686 +0x248 fp=0xc019b7f460 sp=0xc019b7f358 pc=0x7ff798b66208
net.(*sysDialer).dialParallel(0x0?, {0x7ff79a85bba0?, 0xc01612f960?}, {0xc011c37af0?, 0x7ff79a04c440?, 0x7ff79a2f2564?}, {0x0?, 0x7ff79a2f19b0?, 0x7ff79a325363?})
net/dial.go:587 +0x30a fp=0xc019b7f678 sp=0xc019b7f460 pc=0x7ff798b658ea
net.(*Dialer).DialContext(0xc000642630, {0x7ff79a85bb30, 0xc012867ef0}, {0x7ff79a2f19b0, 0x3}, {0xc0137a33b0, 0xe})
net/dial.go:578 +0x6a9 fp=0xc019b7f7e8 sp=0xc019b7f678 pc=0x7ff798b651c9
github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy.(*HTTPTransport).NewTransport.func2({0x7ff79a85bb30, 0xc012867ef0}, {0x7ff79a2f19b0, 0x3}, {0xc0137a33b0, 0xe})
github.com/caddyserver/caddy/[email protected]/modules/caddyhttp/reverseproxy/httptransport.go:284 +0x1cf fp=0xc019b7f9c0 sp=0xc019b7f7e8 pc=0x7ff799ca050f
net/http.(*Transport).dial(0x0?, {0x7ff79a85bb30?, 0xc012867ef0?}, {0x7ff79a2f19b0?, 0x7ff79a8500d0?}, {0xc0137a33b0?, 0x7ff79a841720?})
net/http/transport.go:1278 +0xd2 fp=0xc019b7fa28 sp=0xc019b7f9c0 pc=0x7ff798dd4672
net/http.(*Transport).dialConn(0xc00062e680, {0x7ff79a85bb30, 0xc012867ef0}, {{}, 0x0, {0x7ff79a2f2554, 0x4}, {0xc0137a33b0, 0xe}, 0x0})
net/http/transport.go:1780 +0x7e5 fp=0xc019b7fee0 sp=0xc019b7fa28 pc=0x7ff798dd7625
net/http.(*Transport).dialConnFor(0xc00062e680, 0xc01563f550)
net/http/transport.go:1615 +0xb8 fp=0xc019b7ff90 sp=0xc019b7fee0 pc=0x7ff798dd6138
net/http.(*Transport).startDialConnForLocked.func1()
net/http/transport.go:1597 +0x35 fp=0xc019b7ffe0 sp=0xc019b7ff90 pc=0x7ff798dd5f75
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc019b7ffe8 sp=0xc019b7ffe0 pc=0x7ff798a77be1
created by net/http.(*Transport).startDialConnForLocked in goroutine 5963593
net/http/transport.go:1596 +0x112
Assistance Disclosure
AI not used
If AI was used, describe the extent to which it was used.
No response
The stress test will appear in about 1 minute
Thanks for opening an issue! We'll look into this.
It's not immediately clear to me what is going on, so I'll need your help to understand it better.
Ideally, we need to be able to reproduce the bug in the most minimal way possible using the latest version of Caddy. This allows us to write regression tests to verify the fix is working. If we can't reproduce it, then you'll have to test our changes for us until it's fixed -- and then we can't add test cases, either.
I've attached a template below that will help make this easier and faster! This will require some effort on your part -- please understand that we will be dedicating time to fix the bug you are reporting if you can just help us understand it and reproduce it easily.
This template will ask for some information you've already provided; that's OK, just fill it out the best you can. :+1: I've also included some helpful tips below the template. Feel free to let me know if you have any questions!
Thank you again for your report, we look forward to resolving it!
Template
## 1. Environment
### 1a. Operating system and version
```
paste here
```
### 1b. Caddy version (run `caddy version` or paste commit SHA)
This should be the latest version of Caddy:
```
paste here
```
## 2. Description
### 2a. What happens (briefly explain what is wrong)
### 2b. Why it's a bug (if it's not obvious)
### 2c. Log output
```
paste terminal output or logs here
```
### 2d. Workaround(s)
### 2e. Relevant links
## 3. Tutorial (minimal steps to reproduce the bug)
Instructions -- please heed otherwise we cannot help you (help us help you!)
-
Environment: Please fill out your OS and Caddy versions, even if you don't think they are relevant. (They are always relevant.) If you built Caddy from source, provide the commit SHA and specify your exact Go version.
-
Description: Describe at a high level what the bug is. What happens? Why is it a bug? Not all bugs are obvious, so convince readers that it's actually a bug.
- 2c) Log output: Paste terminal output and/or complete logs in a code block. DO NOT REDACT INFORMATION except for credentials. Please enable debug and access logs.
- 2d) Workaround: What are you doing to work around the problem in the meantime? This can help others who encounter the same problem, until we implement a fix.
- 2e) Relevant links: Please link to any related issues, pull requests, docs, and/or discussion. This can add crucial context to your report.
-
Tutorial: What are the minimum required specific steps someone needs to take in order to experience the same bug? Your goal here is to make sure that anyone else can have the same experience with the bug as you do. You are writing a tutorial, so make sure to carry it out yourself before posting it. Please:
- Start with an empty config. Add only the lines/parameters that are absolutely required to reproduce the bug.
- Do not run Caddy inside containers.
- Run Caddy manually in your terminal; do not use systemd or other init systems.
- If making HTTP requests, avoid web browsers. Use a simpler HTTP client instead, like
curl. - Do not redact any information from your config (except credentials). Domain names are public knowledge and often necessary for quick resolution of an issue!
- Note that ignoring this advice may result in delays, or even in your issue being closed. 😞 Only actionable issues are kept open, and if there is not enough information or clarity to reproduce the bug, then the report is not actionable.
Example of a tutorial:
Create a config file:{ ... }Open terminal and run Caddy:
$ caddy ...Make an HTTP request:
$ curl ...Notice that the result is ___ but it should be ___.
1. Environment
1a. Operating system and version
Windows 11 企业版 / 24H2 / 26100.6584
1b. Caddy version (run caddy version or paste commit SHA)
This should be the latest version of Caddy:
2.10.2
2. Description
2a. What happens (briefly explain what is wrong)
During stress testing, a large amount of logs are output, and it exits automatically.
2b. Why it's a bug (if it's not obvious)
Testing directly with the original port works fine, but when using a reverse proxy, errors occur quickly. The original port has no issues, and performance is much worse when going through the reverse proxy.
2c. Log output
net/dial.go:686 +0x248 fp=0xc019b71460 sp=0xc019b71358 pc=0x7ff798b66208
net.(*sysDialer).dialParallel(0x0?, {0x7ff79a85bba0?, 0xc016176700?}, {0xc01178de70?, 0x7ff79a04c440?, 0x7ff79a2f2564?}, {0x0?, 0x7ff79a2f19b0?, 0x7ff79a325363?})
net/dial.go:587 +0x30a fp=0xc019b71678 sp=0xc019b71460 pc=0x7ff798b658ea
net.(*Dialer).DialContext(0xc000642630, {0x7ff79a85bb30, 0xc0120ec0a0}, {0x7ff79a2f19b0, 0x3}, {0xc012fdcc90, 0xe})
net/dial.go:578 +0x6a9 fp=0xc019b717e8 sp=0xc019b71678 pc=0x7ff798b651c9
github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy.(*HTTPTransport).NewTransport.func2({0x7ff79a85bb30, 0xc0120ec0a0}, {0x7ff79a2f19b0, 0x3}, {0xc012fdcc90, 0xe})
github.com/caddyserver/caddy/[email protected]/modules/caddyhttp/reverseproxy/httptransport.go:284 +0x1cf fp=0xc019b719c0 sp=0xc019b717e8 pc=0x7ff799ca050f
net/http.(*Transport).dial(0x7ff798a6fa19?, {0x7ff79a85bb30?, 0xc0120ec0a0?}, {0x7ff79a2f19b0?, 0x0?}, {0xc012fdcc90?, 0xc000086008?})
net/http/transport.go:1278 +0xd2 fp=0xc019b71a28 sp=0xc019b719c0 pc=0x7ff798dd4672
net/http.(*Transport).dialConn(0xc00062e680, {0x7ff79a85bb30, 0xc0120ec0a0}, {{}, 0x0, {0x7ff79a2f2554, 0x4}, {0xc012fdcc90, 0xe}, 0x0})
net/http/transport.go:1780 +0x7e5 fp=0xc019b71ee0 sp=0xc019b71a28 pc=0x7ff798dd7625
net/http.(*Transport).dialConnFor(0xc00062e680, 0xc011ae7760)
net/http/transport.go:1615 +0xb8 fp=0xc019b71f90 sp=0xc019b71ee0 pc=0x7ff798dd6138
net/http.(*Transport).startDialConnForLocked.func1()
net/http/transport.go:1597 +0x35 fp=0xc019b71fe0 sp=0xc019b71f90 pc=0x7ff798dd5f75
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc019b71fe8 sp=0xc019b71fe0 pc=0x7ff798a77be1
created by net/http.(*Transport).startDialConnForLocked in goroutine 5963650
net/http/transport.go:1596 +0x112
goroutine 7322900 gp=0xc019b4ddc0 m=nil [runnable]:
runtime.cgocall(0x7ff798a79400, 0xc019b55b58)
runtime/cgocall.go:167 +0x3e fp=0xc019b7ed70 sp=0xc019b7ed08 pc=0x7ff798a6bf9e
runtime.syscall_syscalln(0x10?, 0x7ff798a0acbb?, {0xc019b7edb8?, 0x0?, 0x0?})
runtime/syscall_windows.go:521 +0x4e fp=0xc019b7ed90 sp=0xc019b7ed70 pc=0x7ff798a5844e
syscall.Syscall(0xc0158e6020?, 0xc019b7ee18?, 0x7ff798a6cb05?, 0x1?, 0xc019b7ee40?)
runtime/syscall_windows.go:457 +0x29 fp=0xc019b7ede0 sp=0xc019b7ed90 pc=0x7ff798a735c9
syscall.bind(0x4a6a4, 0x7ff79a14b200?, 0x10)
syscall/zsyscall_windows.go:1344 +0x65 fp=0xc019b7ee28 sp=0xc019b7ede0 pc=0x7ff798ad8105
syscall.Bind(0x4a6a4, {0x7ff79a841e00?, 0xc0158e6020?})
syscall/syscall_windows.go:981 +0x46 fp=0xc019b7ee50 sp=0xc019b7ee28 pc=0x7ff798ad04a6
net.(*netFD).connect(0xc019b51688, {0x7ff79a85bba0, 0xc01612f960}, {0x0, 0x0?}, {0x7ff79a841e00, 0xc0158e6000})
net/fd_windows.go:137 +0x389 fp=0xc019b7ef90 sp=0xc019b7ee50 pc=0x7ff798b708c9
net.(*netFD).dial(0xc019b51688, {0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a8637e0?, 0x0?}, {0x7ff79a8637e0, 0xc015714210}, 0x7ff798b7486b?)
net/sock_posix.go:124 +0x3c5 fp=0xc019b7f068 sp=0xc019b7ef90 pc=0x7ff798b89ca5
net.socket({0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a2f19b0, 0x3}, 0x2, 0x1, 0x0?, 0x0, {0x7ff79a8637e0, 0x0}, ...)
net/sock_posix.go:70 +0x2af fp=0xc019b7f110 sp=0xc019b7f068 pc=0x7ff798b897ef
net.internetSocket({0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a2f19b0, 0x3}, {0x7ff79a8637e0, 0x0}, {0x7ff79a8637e0?, 0xc015714210?}, 0x1, 0x0, ...)
net/ipsock_posix.go:167 +0x1e5 fp=0xc019b7f198 sp=0xc019b7f110 pc=0x7ff798b7a545
net.(*sysDialer).doDialTCPProto(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, 0x0, 0xc015714210, 0x0)
net/tcpsock_posix.go:85 +0xe7 fp=0xc019b7f248 sp=0xc019b7f198 pc=0x7ff798b8d487
net.(*sysDialer).doDialTCP(...)
net/tcpsock_posix.go:75
net.(*sysDialer).dialTCP(0x7ff798a6beb9?, {0x7ff79a85bba0?, 0xc01612f960?}, 0x7ff799f49320?, 0xc019b7f320?)
net/tcpsock_posix.go:71 +0x69 fp=0xc019b7f288 sp=0xc019b7f248 pc=0x7ff798b8d329
net.(*sysDialer).dialSingle(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, {0x7ff79a850350, 0xc015714210})
net/dial.go:721 +0x3ce fp=0xc019b7f358 sp=0xc019b7f288 pc=0x7ff798b66a2e
net.(*sysDialer).dialSerial(0xc0162bed80, {0x7ff79a85bba0, 0xc01612f960}, {0xc011c37af0?, 0x1, 0x7ff798b79bbe?})
net/dial.go:686 +0x248 fp=0xc019b7f460 sp=0xc019b7f358 pc=0x7ff798b66208
net.(*sysDialer).dialParallel(0x0?, {0x7ff79a85bba0?, 0xc01612f960?}, {0xc011c37af0?, 0x7ff79a04c440?, 0x7ff79a2f2564?}, {0x0?, 0x7ff79a2f19b0?, 0x7ff79a325363?})
net/dial.go:587 +0x30a fp=0xc019b7f678 sp=0xc019b7f460 pc=0x7ff798b658ea
net.(*Dialer).DialContext(0xc000642630, {0x7ff79a85bb30, 0xc012867ef0}, {0x7ff79a2f19b0, 0x3}, {0xc0137a33b0, 0xe})
net/dial.go:578 +0x6a9 fp=0xc019b7f7e8 sp=0xc019b7f678 pc=0x7ff798b651c9
github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy.(*HTTPTransport).NewTransport.func2({0x7ff79a85bb30, 0xc012867ef0}, {0x7ff79a2f19b0, 0x3}, {0xc0137a33b0, 0xe})
github.com/caddyserver/caddy/[email protected]/modules/caddyhttp/reverseproxy/httptransport.go:284 +0x1cf fp=0xc019b7f9c0 sp=0xc019b7f7e8 pc=0x7ff799ca050f
net/http.(*Transport).dial(0x0?, {0x7ff79a85bb30?, 0xc012867ef0?}, {0x7ff79a2f19b0?, 0x7ff79a8500d0?}, {0xc0137a33b0?, 0x7ff79a841720?})
net/http/transport.go:1278 +0xd2 fp=0xc019b7fa28 sp=0xc019b7f9c0 pc=0x7ff798dd4672
net/http.(*Transport).dialConn(0xc00062e680, {0x7ff79a85bb30, 0xc012867ef0}, {{}, 0x0, {0x7ff79a2f2554, 0x4}, {0xc0137a33b0, 0xe}, 0x0})
net/http/transport.go:1780 +0x7e5 fp=0xc019b7fee0 sp=0xc019b7fa28 pc=0x7ff798dd7625
net/http.(*Transport).dialConnFor(0xc00062e680, 0xc01563f550)
net/http/transport.go:1615 +0xb8 fp=0xc019b7ff90 sp=0xc019b7fee0 pc=0x7ff798dd6138
net/http.(*Transport).startDialConnForLocked.func1()
net/http/transport.go:1597 +0x35 fp=0xc019b7ffe0 sp=0xc019b7ff90 pc=0x7ff798dd5f75
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc019b7ffe8 sp=0xc019b7ffe0 pc=0x7ff798a77be1
created by net/http.(*Transport).startDialConnForLocked in goroutine 5963593
net/http/transport.go:1596 +0x112
2d. Workaround(s)
Using Nginx
2e. Relevant links
Download link for the stress testing tool https://wwxn.lanzn.com/ijCzU367lr7c
3. Tutorial (minimal steps to reproduce the bug)
package main
import (
"fmt"
"net/http"
)
func handler(w http.ResponseWriter, r *http.Request) {
// 从 URL 查询参数中获取 name 的值
name := r.URL.Query().Get("name")
if name == "" {
name = "world" // 默认值
}
fmt.Fprintf(w, "hello %s", name)
}
func main() {
http.HandleFunc("/", handler)
fmt.Println("Server is running at http://127.0.0.1:8080/")
if err := http.ListenAndServe(":8080", nil); err != nil {
fmt.Println("Error starting server:", err)
}
}
Thanks, but those are still not the full logs. It is part of the stack trace but I can't know what or where the error is without all the logs. Including all previous lines.
There are quite a lot of logs, all of which are the same type as previously sent. I recorded the entire process, but the file is too large—over 200 MB after compression—so it can't be uploaded. If you use the stress testing tool and reverse proxy configuration I provided, the issue can typically be reproduced within just one minute of testing.
The problem is you started copying right in the middle of the single message which means it's completely useless. We need to see the full start of the error message, and ideally some lines of logs before that.
You can use grep or other tools -- even a text editor if you have enough RAM -- to isolate the relevant logs.
It doesn't show what went wrong at all, it automatically displays all this useless information, and there's way too much of it—it just won't stop. Even pressing CTRL+C doesn't work.
The output is too large; what was initially displayed can no longer be seen. It's probably the same issue. Just compile the Go code I provided, set up a reverse proxy forwarding port 80 to 8080, and use the stress testing tool I provided with 100 concurrent connections and keep-alive enabled. Run the test for one minute to reproduce the issue.
It's not useless information, it's a stack trace. We need the start of it. If we don't have that then we have nothing. You can easily pipe Caddy's logs to a file, then open that file and copy the start of where the error started.
The information about the starting position has been overwritten and is no longer visible, because the stack trace output is too extensive and non-stop, and the terminal cannot display that much information.
Like I said, pipe it to a file.
.\caddy.exe : {"level":"info","ts":1758162674.0990374,"msg":"maxprocs: Leaving GOMAXPROCS=12: CPU quota undefined"}
所在位置 行:1 字符: 1
+ .\caddy.exe run > caddy.log 2>&1
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ CategoryInfo : NotSpecified: ({"level":"info"...ota undefined"}:String) [], RemoteException
+ FullyQualifiedErrorId : NativeCommandError
{"level":"info","ts":1758162674.0990374,"msg":"GOMEMLIMIT is updated","package":"github.com/KimMachineGun/automemlimit/
memlimit","GOMEMLIMIT":15329636352,"previous":9223372036854775807}
{"level":"info","ts":1758162674.0990374,"msg":"using adjacent Caddyfile"}
{"level":"info","ts":1758162674.1000834,"msg":"adapted config to JSON","adapter":"caddyfile"}
{"level":"warn","ts":1758162674.1000834,"msg":"Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inc
onsistencies","adapter":"caddyfile","file":"Caddyfile","line":2}
{"level":"info","ts":1758162674.1006556,"msg":"redirected default logger","from":"stderr","to":"C:\\Users\\afeng\\Deskt
op\\caddy\\access.log"}
{"level":"info","ts":1758162674.103221,"msg":"serving initial configuration"}
runtime: program exceeds 10000-thread limit
fatal error: thread exhaustion
runtime stack:
runtime.throw({0x7ff777762009?, 0xc00009cb10?})
runtime/panic.go:1094 +0x4d fp=0x1b05ff648 sp=0x1b05ff618 pc=0x7ff775ebf36d
runtime.checkmcount()
runtime/proc.go:962 +0x8e fp=0x1b05ff670 sp=0x1b05ff648 pc=0x7ff775e8be8e
runtime.mReserveID()
runtime/proc.go:978 +0x2f fp=0x1b05ff698 sp=0x1b05ff670 pc=0x7ff775e8becf
runtime.startm(0xc000075208?, 0x0, 0x0)
runtime/proc.go:3079 +0x112 fp=0x1b05ff6e8 sp=0x1b05ff698 pc=0x7ff775e8fb92
runtime.handoffp(0x7ffcffffffff?)
runtime/proc.go:3128 +0x345 fp=0x1b05ff710 sp=0x1b05ff6e8 pc=0x7ff775e900a5
runtime.retake(0x22d552519f04c)
runtime/proc.go:6441 +0x255 fp=0x1b05ff770 sp=0x1b05ff710 pc=0x7ff775e98355
runtime.sysmon()
runtime/proc.go:6345 +0x3e8 fp=0x1b05ff808 sp=0x1b05ff770 pc=0x7ff775e97f08
runtime.mstart1()
runtime/proc.go:1928 +0x9d fp=0x1b05ff830 sp=0x1b05ff808 pc=0x7ff775e8e21d
runtime.mstart0()
runtime/proc.go:1881 +0x6a fp=0x1b05ff858 sp=0x1b05ff830 pc=0x7ff775e8e16a
runtime.mstart()
runtime/asm_amd64.s:395 +0x5 fp=0x1b05ff860 sp=0x1b05ff858 pc=0x7ff775ec5b05
goroutine 1 gp=0xc0000021c0 m=nil [select (no cases)]:
runtime.gopark(0xc0007879b8?, 0x7ff775f5d78c?, 0x3a?, 0x2a?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000b3f998 sp=0xc000b3f978 pc=0x7ff775ebf48e
runtime.block()
runtime/select.go:104 +0x26 fp=0xc000b3f9c8 sp=0xc000b3f998 pc=0x7ff775e9d106
github.com/caddyserver/caddy/v2/cmd.cmdRun({0x0?})
github.com/caddyserver/caddy/[email protected]/cmd/commandfuncs.go:292 +0xc72 fp=0xc000b3fc08 sp=0xc000b3f9c8 pc=0x7ff7766498
b2
github.com/caddyserver/caddy/v2/cmd.init.1.func2.WrapCommandFuncForCobra.1(0xc000724608, {0x7ff7777424fc?, 0x4?, 0x7ff7
777424d4?})
github.com/caddyserver/caddy/[email protected]/cmd/cobra.go:141 +0x2f fp=0xc000b3fc48 sp=0xc000b3fc08 pc=0x7ff776656f6f
github.com/spf13/cobra.(*Command).execute(0xc000724608, {0x7ff778c81940, 0x0, 0x0})
github.com/spf13/[email protected]/command.go:1015 +0xb02 fp=0xc000b3fdf0 sp=0xc000b3fc48 pc=0x7ff776067b42
github.com/spf13/cobra.(*Command).ExecuteC(0xc000724008)
github.com/spf13/[email protected]/command.go:1148 +0x465 fp=0xc000b3fee0 sp=0xc000b3fdf0 pc=0x7ff776068485
github.com/spf13/cobra.(*Command).Execute(...)
github.com/spf13/[email protected]/command.go:1071
github.com/caddyserver/caddy/v2/cmd.Main()
github.com/caddyserver/caddy/[email protected]/cmd/main.go:72 +0x65 fp=0xc000b3ff40 sp=0xc000b3fee0 pc=0x7ff77664e0a5
main.main()
caddy/main.go:11 +0xf fp=0xc000b3ff50 sp=0xc000b3ff40 pc=0x7ff77720938f
runtime.main()
runtime/proc.go:285 +0x27d fp=0xc000b3ffe0 sp=0xc000b3ff50 pc=0x7ff775e8acdd
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000b3ffe8 sp=0xc000b3ffe0 pc=0x7ff775ec7be1
goroutine 2 gp=0xc0000028c0 m=nil [force gc (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000093fa8 sp=0xc000093f88 pc=0x7ff775ebf48e
runtime.goparkunlock(...)
runtime/proc.go:466
runtime.forcegchelper()
runtime/proc.go:373 +0xb8 fp=0xc000093fe0 sp=0xc000093fa8 pc=0x7ff775e8aff8
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000093fe8 sp=0xc000093fe0 pc=0x7ff775ec7be1
created by runtime.init.7 in goroutine 1
runtime/proc.go:361 +0x1a
goroutine 3 gp=0xc000002c40 m=nil [GC sweep wait]:
runtime.gopark(0x1?, 0x0?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000095f80 sp=0xc000095f60 pc=0x7ff775ebf48e
runtime.goparkunlock(...)
runtime/proc.go:466
runtime.bgsweep(0xc0000a2000)
runtime/mgcsweep.go:323 +0xdf fp=0xc000095fc8 sp=0xc000095f80 pc=0x7ff775e7053f
runtime.gcenable.gowrap1()
runtime/mgc.go:212 +0x25 fp=0xc000095fe0 sp=0xc000095fc8 pc=0x7ff775e643c5
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000095fe8 sp=0xc000095fe0 pc=0x7ff775ec7be1
created by runtime.gcenable in goroutine 1
runtime/mgc.go:212 +0x66
goroutine 4 gp=0xc000002e00 m=nil [GC scavenge wait]:
runtime.gopark(0xb69248?, 0x4ea191?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc0000a9f78 sp=0xc0000a9f58 pc=0x7ff775ebf48e
runtime.goparkunlock(...)
runtime/proc.go:466
runtime.(*scavengerState).park(0x7ff778c36ee0)
runtime/mgcscavenge.go:425 +0x49 fp=0xc0000a9fa8 sp=0xc0000a9f78 pc=0x7ff775e6dfa9
runtime.bgscavenge(0xc0000a2000)
runtime/mgcscavenge.go:658 +0x59 fp=0xc0000a9fc8 sp=0xc0000a9fa8 pc=0x7ff775e6e559
runtime.gcenable.gowrap2()
runtime/mgc.go:213 +0x25 fp=0xc0000a9fe0 sp=0xc0000a9fc8 pc=0x7ff775e64365
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0000a9fe8 sp=0xc0000a9fe0 pc=0x7ff775ec7be1
created by runtime.gcenable in goroutine 1
runtime/mgc.go:213 +0xa5
goroutine 5 gp=0xc000003180 m=nil [GOMAXPROCS updater (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc0000abf88 sp=0xc0000abf68 pc=0x7ff775ebf48e
runtime.goparkunlock(...)
runtime/proc.go:466
runtime.updateMaxProcsGoroutine()
runtime/proc.go:6706 +0xe7 fp=0xc0000abfe0 sp=0xc0000abf88 pc=0x7ff775e99007
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0000abfe8 sp=0xc0000abfe0 pc=0x7ff775ec7be1
created by runtime.defaultGOMAXPROCSUpdateEnable in goroutine 1
runtime/proc.go:6694 +0x37
goroutine 6 gp=0xc000003500 m=nil [finalizer wait]:
runtime.gopark(0x7ff775e99ffa?, 0x7ff775e6d3dc?, 0x20?, 0x40?, 0x490013?)
runtime/proc.go:460 +0xce fp=0xc000097e20 sp=0xc000097e00 pc=0x7ff775ebf48e
runtime.runFinalizers()
runtime/mfinal.go:210 +0x107 fp=0xc000097fe0 sp=0xc000097e20 pc=0x7ff775e63307
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000097fe8 sp=0xc000097fe0 pc=0x7ff775ec7be1
created by runtime.createfing in goroutine 1
runtime/mfinal.go:172 +0x3d
goroutine 18 gp=0xc000106380 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfe48e4c?, 0x1?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc0000a5f38 sp=0xc0000a5f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc0000a5fc8 sp=0xc0000a5f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc0000a5fe0 sp=0xc0000a5fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0000a5fe8 sp=0xc0000a5fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 34 gp=0xc0004861c0 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfdb0160?, 0x1?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000491f38 sp=0xc000491f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc000491fc8 sp=0xc000491f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc000491fe0 sp=0xc000491fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000491fe8 sp=0xc000491fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 35 gp=0xc000486380 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfe48e4c?, 0x3?, 0x64?, 0xb7?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000493f38 sp=0xc000493f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc000493fc8 sp=0xc000493f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc000493fe0 sp=0xc000493fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000493fe8 sp=0xc000493fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 36 gp=0xc000486540 m=nil [GC worker (idle)]:
runtime.gopark(0x22d5331ba5154?, 0x1?, 0xc?, 0xbd?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00048df38 sp=0xc00048df18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc00048dfc8 sp=0xc00048df38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc00048dfe0 sp=0xc00048dfc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00048dfe8 sp=0xc00048dfe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 37 gp=0xc000486700 m=nil [GC worker (idle)]:
runtime.gopark(0x22d5331ba5154?, 0x1?, 0x98?, 0xf3?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00048ff38 sp=0xc00048ff18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc00048ffc8 sp=0xc00048ff38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc00048ffe0 sp=0xc00048ffc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00048ffe8 sp=0xc00048ffe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 38 gp=0xc0004868c0 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfe48e4c?, 0x1?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00049bf38 sp=0xc00049bf18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc00049bfc8 sp=0xc00049bf38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc00049bfe0 sp=0xc00049bfc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00049bfe8 sp=0xc00049bfe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 7 gp=0xc0000036c0 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfdb0160?, 0x1?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000497f38 sp=0xc000497f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc000497fc8 sp=0xc000497f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc000497fe0 sp=0xc000497fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000497fe8 sp=0xc000497fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 19 gp=0xc000106540 m=nil [GC worker (idle)]:
runtime.gopark(0x7ff778c84020?, 0x1?, 0x88?, 0xd3?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc0000a7f38 sp=0xc0000a7f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc0000a7fc8 sp=0xc0000a7f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc0000a7fe0 sp=0xc0000a7fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0000a7fe8 sp=0xc0000a7fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 39 gp=0xc000486a80 m=nil [GC worker (idle)]:
runtime.gopark(0x22d5331ba5154?, 0x3?, 0x64?, 0xb7?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00049df38 sp=0xc00049df18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc00049dfc8 sp=0xc00049df38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc00049dfe0 sp=0xc00049dfc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00049dfe8 sp=0xc00049dfe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 8 gp=0xc000003880 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfdb0160?, 0x1?, 0x4?, 0xd2?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000499f38 sp=0xc000499f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc000499fc8 sp=0xc000499f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc000499fe0 sp=0xc000499fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000499fe8 sp=0xc000499fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 20 gp=0xc000106700 m=nil [GC worker (idle)]:
runtime.gopark(0x22d54dfe48e4c?, 0x3?, 0x64?, 0xb7?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00044df38 sp=0xc00044df18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc00044dfc8 sp=0xc00044df38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc00044dfe0 sp=0xc00044dfc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00044dfe8 sp=0xc00044dfe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 40 gp=0xc000486c40 m=nil [GC worker (idle)]:
runtime.gopark(0x22d5331ba5154?, 0x3?, 0x64?, 0xb7?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc000449f38 sp=0xc000449f18 pc=0x7ff775ebf48e
runtime.gcBgMarkWorker(0xc0002181c0)
runtime/mgc.go:1463 +0xeb fp=0xc000449fc8 sp=0xc000449f38 pc=0x7ff775e66b4b
runtime.gcBgMarkStartWorkers.gowrap1()
runtime/mgc.go:1373 +0x25 fp=0xc000449fe0 sp=0xc000449fc8 pc=0x7ff775e66a25
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc000449fe8 sp=0xc000449fe0 pc=0x7ff775ec7be1
created by runtime.gcBgMarkStartWorkers in goroutine 1
runtime/mgc.go:1373 +0x105
goroutine 9 gp=0xc000487500 m=nil [cleanup wait]:
runtime.gopark(0x0?, 0x101000000000000?, 0x9e?, 0x1?, 0x2?)
runtime/proc.go:460 +0xce fp=0xc00044bf68 sp=0xc00044bf48 pc=0x7ff775ebf48e
runtime.goparkunlock(...)
runtime/proc.go:466
runtime.(*cleanupQueue).dequeue(0x7ff778c373e0)
runtime/mcleanup.go:439 +0xc5 fp=0xc00044bfa0 sp=0xc00044bf68 pc=0x7ff775e601c5
runtime.runCleanups()
runtime/mcleanup.go:635 +0x45 fp=0xc00044bfe0 sp=0xc00044bfa0 pc=0x7ff775e60885
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00044bfe8 sp=0xc00044bfe0 pc=0x7ff775ec7be1
created by runtime.(*cleanupQueue).createGs in goroutine 1
runtime/mcleanup.go:589 +0xa5
goroutine 1401798 gp=0xc000106a80 m=556 mp=0xc000b9c008 [syscall]:
runtime.cgocall(0x7ff775ec9400, 0xc000b9c358)
runtime/cgocall.go:167 +0x3e fp=0xc001c32f10 sp=0xc001c32ea8 pc=0x7ff775ebbf9e
runtime.syscall_syscalln(0x7ff775fc1080?, 0xc00293b210?, {0xc001c32f58?, 0x0?, 0x0?})
runtime/syscall_windows.go:521 +0x4e fp=0xc001c32f30 sp=0xc001c32f10 pc=0x7ff775ea844e
syscall.Syscall(0xc001c33058?, 0x1925ae31430?, 0xc001c32fb0?, 0x7ff775ebe4c5?, 0x7ff778c827f0?)
runtime/syscall_windows.go:457 +0x29 fp=0xc001c32f80 sp=0xc001c32f30 pc=0x7ff775ec35c9
syscall.Closesocket(0x338c)
syscall/zsyscall_windows.go:1352 +0x4f fp=0xc001c32fc0 sp=0xc001c32f80 pc=0x7ff775f281ef
internal/poll.(*FD).destroy(0xc000cf4288)
internal/poll/fd_windows.go:461 +0xa2 fp=0xc001c32fe8 sp=0xc001c32fc0 pc=0x7ff775f52702
internal/poll.(*FD).decref(0x1925ae31400?)
internal/poll/fd_mutex.go:213 +0x53 fp=0xc001c33008 sp=0xc001c32fe8 pc=0x7ff775f504f3
internal/poll.(*FD).Close(0xc000cf4288)
internal/poll/fd_windows.go:482 +0x65 fp=0xc001c33038 sp=0xc001c33008 pc=0x7ff775f527e5
net.(*netFD).Close(0xc000cf4288)
net/fd_posix.go:50 +0x32 fp=0xc001c33068 sp=0xc001c33038 pc=0x7ff775fbf0d2
net.socket({0x7ff777cabba0, 0xc001cc8a80}, {0x7ff7777419b0, 0x3}, 0x2, 0x1, 0x7ff777a41188?, 0x0, {0x7ff777cb37e0, 0x0}
, ...)
net/sock_posix.go:71 +0x2c8 fp=0xc001c33110 sp=0xc001c33068 pc=0x7ff775fd9808
net.internetSocket({0x7ff777cabba0, 0xc001cc8a80}, {0x7ff7777419b0, 0x3}, {0x7ff777cb37e0, 0x0}, {0x7ff777cb37e0?, 0xc0
01f1e3f0?}, 0x1, 0x0, ...)
net/ipsock_posix.go:167 +0x1e5 fp=0xc001c33198 sp=0xc001c33110 pc=0x7ff775fca545
net.(*sysDialer).doDialTCPProto(0xc002edf5c0, {0x7ff777cabba0, 0xc001cc8a80}, 0x0, 0xc001f1e3f0, 0x0)
net/tcpsock_posix.go:85 +0xe7 fp=0xc001c33248 sp=0xc001c33198 pc=0x7ff775fdd487
net.(*sysDialer).doDialTCP(...)
net/tcpsock_posix.go:75
net.(*sysDialer).dialTCP(0x7ff775ebbeb9?, {0x7ff777cabba0?, 0xc001cc8a80?}, 0x7ff777399320?, 0xc001c33320?)
net/tcpsock_posix.go:71 +0x69 fp=0xc001c33288 sp=0xc001c33248 pc=0x7ff775fdd329
net.(*sysDialer).dialSingle(0xc002edf5c0, {0x7ff777cabba0, 0xc001cc8a80}, {0x7ff777ca0350, 0xc001f1e3f0})
net/dial.go:721 +0x3ce fp=0xc001c33358 sp=0xc001c33288 pc=0x7ff775fb6a2e
net.(*sysDialer).dialSerial(0xc002edf5c0, {0x7ff777cabba0, 0xc001cc8a80}, {0xc00293b200?, 0x1, 0x7ff775fc9bbe?})
net/dial.go:686 +0x248 fp=0xc001c33460 sp=0xc001c33358 pc=0x7ff775fb6208
net.(*sysDialer).dialParallel(0x0?, {0x7ff777cabba0?, 0xc001cc8a80?}, {0xc00293b200?, 0x7ff77749c440?, 0x7ff777742564?}
, {0x0?, 0x7ff7777419b0?, 0x7ff777775363?})
net/dial.go:587 +0x30a fp=0xc001c33678 sp=0xc001c33460 pc=0x7ff775fb58ea
net.(*Dialer).DialContext(0xc0007314d0, {0x7ff777cabb30, 0xc002dd2780}, {0x7ff7777419b0, 0x3}, {0xc0035c9cf0, 0xe})
net/dial.go:578 +0x6a9 fp=0xc001c337e8 sp=0xc001c33678 pc=0x7ff775fb51c9
github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy.(*HTTPTransport).NewTransport.func2({0x7ff777cabb30, 0xc
002dd2780}, {0x7ff7777419b0, 0x3}, {0xc0035c9cf0, 0xe})
github.com/caddyserver/caddy/[email protected]/modules/caddyhttp/reverseproxy/httptransport.go:284 +0x1cf fp=0xc001c339c0 sp=
0xc001c337e8 pc=0x7ff7770f050f
net/http.(*Transport).dial(0x7ff775ebfa19?, {0x7ff777cabb30?, 0xc002dd2780?}, {0x7ff7777419b0?, 0x0?}, {0xc0035c9cf0?,
0xc000075208?})
net/http/transport.go:1278 +0xd2 fp=0xc001c33a28 sp=0xc001c339c0 pc=0x7ff776224672
net/http.(*Transport).dialConn(0xc00041cea0, {0x7ff777cabb30, 0xc002dd2780}, {{}, 0x0, {0x7ff777742554, 0x4}, {0xc0035c
9cf0, 0xe}, 0x0})
net/http/transport.go:1780 +0x7e5 fp=0xc001c33ee0 sp=0xc001c33a28 pc=0x7ff776227625
net/http.(*Transport).dialConnFor(0xc00041cea0, 0xc00319adc0)
net/http/transport.go:1615 +0xb8 fp=0xc001c33f90 sp=0xc001c33ee0 pc=0x7ff776226138
net/http.(*Transport).startDialConnForLocked.func1()
net/http/transport.go:1597 +0x35 fp=0xc001c33fe0 sp=0xc001c33f90 pc=0x7ff776225f75
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc001c33fe8 sp=0xc001c33fe0 pc=0x7ff775ec7be1
created by net/http.(*Transport).startDialConnForLocked in goroutine 172
net/http/transport.go:1596 +0x112
goroutine 41 gp=0xc000106c40 m=nil [chan receive]:
runtime.gopark(0x7ff7765a9af8?, 0x7ff7774cd240?, 0x1?, 0x20?, 0xc0004a9f20?)
runtime/proc.go:460 +0xce fp=0xc0004a9e90 sp=0xc0004a9e70 pc=0x7ff775ebf48e
runtime.chanrecv(0xc0000e0230, 0x0, 0x1)
runtime/chan.go:667 +0x445 fp=0xc0004a9f08 sp=0xc0004a9e90 pc=0x7ff775e52fe5
runtime.chanrecv1(0xc0000e0230?, 0xc0004a9fb0?)
runtime/chan.go:509 +0x12 fp=0xc0004a9f30 sp=0xc0004a9f08 pc=0x7ff775e52b72
github.com/caddyserver/caddy/v2.trapSignalsCrossPlatform.func1()
github.com/caddyserver/caddy/[email protected]/sigtrap.go:43 +0xde fp=0xc0004a9fe0 sp=0xc0004a9f30 pc=0x7ff7765ceb9e
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0004a9fe8 sp=0xc0004a9fe0 pc=0x7ff775ec7be1
created by github.com/caddyserver/caddy/v2.trapSignalsCrossPlatform in goroutine 1
github.com/caddyserver/caddy/[email protected]/sigtrap.go:38 +0x1a
goroutine 22 gp=0xc000506380 m=5 mp=0xc000100008 [syscall]:
runtime.notetsleepg(0x7ff778c83120, 0xffffffffffffffff)
runtime/lock_sema.go:175 +0x31 fp=0xc0004a5fa0 sp=0xc0004a5f68 pc=0x7ff775e58d51
os/signal.signal_recv()
runtime/sigqueue.go:152 +0x29 fp=0xc0004a5fc0 sp=0xc0004a5fa0 pc=0x7ff775ec0f89
os/signal.loop()
os/signal/signal_unix.go:23 +0x13 fp=0xc0004a5fe0 sp=0xc0004a5fc0 pc=0x7ff7765aa2f3
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc0004a5fe8 sp=0xc0004a5fe0 pc=0x7ff775ec7be1
created by os/signal.Notify.func1.1 in goroutine 41
os/signal/signal.go:152 +0x1f
goroutine 49 gp=0xc000106e00 m=nil [chan receive]:
runtime.gopark(0xc00044ff40?, 0x3?, 0x3?, 0x0?, 0x0?)
runtime/proc.go:460 +0xce fp=0xc00044ff08 sp=0xc00044fee8 pc=0x7ff775ebf48e
runtime.chanrecv(0xc00010ab00, 0xc00044ffbf, 0x1)
runtime/chan.go:667 +0x445 fp=0xc00044ff80 sp=0xc00044ff08 pc=0x7ff775e52fe5
runtime.chanrecv2(0xc0005d1860?, 0x7ff775e93592?)
runtime/chan.go:514 +0x12 fp=0xc00044ffa8 sp=0xc00044ff80 pc=0x7ff775e52b92
gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun(...)
gopkg.in/natefinch/[email protected]/lumberjack.go:379
gopkg.in/natefinch/lumberjack%2ev2.(*Logger).mill.func1.gowrap1()
gopkg.in/natefinch/[email protected]/lumberjack.go:390 +0x49 fp=0xc00044ffe0 sp=0xc00044ffa8 pc=0x7ff7771d8b29
runtime.goexit({})
runtime/asm_amd64.s:1693 +0x1 fp=0xc00044ffe8 sp=0xc00044ffe0 pc=0x7ff775ec7be1
created by gopkg.in/natefinch/lumberjack%2ev2.(*Logger).mill.func1 in goroutine 1
gopkg.in/natefinch/[email protected]/lumberjack.go:390 +0x8c
I only have 100 concurrent connections, why is Caddy exceeding the 10,000-thread limit?
Try deleting all this stuff:
header_up Host {host}
header_up X-Real-IP {remote_host}
transport http {
dial_timeout 5s
response_header_timeout 60s
read_buffer 128k
write_buffer 128k
}
# WebSocket 支持
flush_interval -1
i.e. just do reverse_proxy 127.0.0.1:8080 with no extra options. Does it behave the same?
The error is the same as before
Looking into it. Seems to be an edge case on Windows specifically. (I don't have Windows, but @WeidiDeng offered to test.)
caddy's default reverse proxy configuration isn't suited for this type of benchmark. There are several ways to fix this:
- Increase keepalive_idle_conns_per_host. Default value of 32 is too small for heavy traffic.
- Limit the number of connections to the backend. By default there is no limit.
- Disable keepalive. Connections are established as needed and closed when done. By default it's enabled.
When reverse proxy sends a request, it performs the following steps to get a connection:
- Get a cached connection from the connection pool, or create a new goroutine to dial the connection.
- If no connection is available in the first step, wait for an idle connection or the dial to be successful.
If an idle connection is retrieved, the newly created connection will be put to the pool or closed if the limit is reached.
When reverse proxy finishes writing the response to the client, it will perform some cleanups before the connection is put to the pool. By this time the client will have already received the response and ready to send the next request.
In this benchmark, new connections are being created and closed very quickly because of this delay in reverse proxy handling the cleanup. Connection creation, reading, close all need syscall. However, on windows, syscall of this nature is exposed only in c api. golang use runtime.cgocall even if cgo is disabled. And every cgo call uses one os thread and is slower as well. This causes many threads to be created, and the thread limit to be reached.
The server example for testing is also written in Go. However, when using a testing tool for stress testing directly, no issues occur, and the performance is even better—it can handle 100,000 requests per second. After using a reverse proxy, the performance drops to 33,000 requests per second, and a "10,000-thread limit" error is reported. So it seems to be a thread-related issue, possibly due to the lack of thread restrictions.
Okay, very interesting. Thanks for the discussion here and on Slack too.
@afengsoft Does doing Weidi's 3 suggestions fix it for you?
I'm actually curious if the first 1 or 2 are sufficient. If so, we can probably do that in the code pretty easily (especially the first one). The third would not be my favorite to make default.
Sorry, I haven't tested it yet because I'm not sure about the exact configuration.
@afengsoft What questions do you have? Maybe @WeidiDeng and I can help clear it up.
@mholt @WeidiDeng Can you provide a specific configuration?
Something like:
reverse_proxy 127.0.0.1:8080 {
header_up Host {host}
header_up X-Real-IP {remote_host}
transport http {
dial_timeout 5s
response_header_timeout 60s
read_buffer 128k
write_buffer 128k
keepalive_idle_conns_per_host 128 # <-- new
max_conns_per_host 32 # <-- new
keepalive off # <-- new
}
flush_interval -1
}
A large number of errors occurred in about a minute, and the performance is even worse than the previous configuration.
2025/10/29 00:58:35.923 ERROR http.log.error dial tcp 127.0.0.1:8080: connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted. {"request": {"remote_ip": "127.0.0.1", "remote_port": "30598", "client_ip": "127.0.0.1", "proto": "HTTP/1.1", "method": "GET", "host": "127.0.0.1", "uri": "/", "headers": {}}, "duration": 0.3241717, "status": 502, "err_id": "v4dgxdu32", "err_trace": "reverseproxy.statusError (reverseproxy.go:1390)"} 2025/10/29 00:58:35.923 ERROR http.log.error dial tcp 127.0.0.1:8080: connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted. {"request": {"remote_ip": "127.0.0.1", "remote_port": "30535", "client_ip": "127.0.0.1", "proto": "HTTP/1.1", "method": "GET", "host": "127.0.0.1", "uri": "/", "headers": {}}, "duration": 0.3337308, "status": 502, "err_id": "mru65uktj", "err_trace": "reverseproxy.statusError (reverseproxy.go:1390)"} 2025/10/29 00:58:35.923 ERROR http.log.error dial tcp 127.0.0.1:8080: connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted. {"request": {"remote_ip": "127.0.0.1", "remote_port": "30483", "client_ip": "127.0.0.1", "proto": "HTTP/1.1", "method": "GET", "host": "127.0.0.1", "uri": "/", "headers": {}}, "duration": 0.3288088, "status": 502, "err_id": "333hri71p", "err_trace": "reverseproxy.statusError (reverseproxy.go:1390)"} 2025/10/29 00:58:35.923 ERROR http.log.error dial tcp 127.0.0.1:8080: connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted. {"request": {"remote_ip": "127.0.0.1", "remote_port": "30607", "client_ip": "127.0.0.1", "proto": "HTTP/1.1", "method": "GET", "host": "127.0.0.1", "uri": "/", "headers": {}}, "duration": 0.3205443, "status": 502, "err_id": "b78zeaze3", "err_trace": "reverseproxy.statusError (reverseproxy.go:1390)"} 2025/10/29 00:58:35.941 ERROR http.log.error dial tcp 127.0.0.1:8080: connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted. {"request": {"remote_ip": "127.0.0.1", "remote_port": "30529", "client_ip": "127.0.0.1", "proto": "HTTP/1.1", "method": "GET", "host": "127.0.0.1", "uri": "/", "headers": {}}, "duration": 0.3226776, "status": 502, "err_id": "zwr6pguyn", "err_trace": "reverseproxy.statusError (reverseproxy.go:1390)"}
@afengsoft You only need to pick one of the new options:
Like this
reverse_proxy 127.0.0.1:8080 {
header_up Host {host}
header_up X-Real-IP {remote_host}
transport http {
dial_timeout 5s
response_header_timeout 60s
read_buffer 128k
write_buffer 128k
keepalive_idle_conns_per_host 128 # <-- new
}
flush_interval -1
}
These options are incompatible, and are expected to lead to a lower performance. Did the new config lead to panic?
@afengsoft您只需选择以下新选项之一:
像这样
reverse_proxy 127.0.0.1:8080 { header_up Host {host} header_up X-Real-IP {remote_host} transport http { dial_timeout 5s response_header_timeout 60s read_buffer 128k write_buffer 128k keepalive_idle_conns_per_host 128 # <-- new } flush_interval -1 }这些选项不兼容,预计会导致性能下降。新配置是否导致系统崩溃?
It will crash, and the performance will be halved.
Just to clarify, it crashes and has half the performance with only ONE of the new options?
(You might also consider starting with a blank slate, and adding just one line of config at a time, to see how each one affects performance, rather than 5-7 config lines at a time!)