aiohttp
aiohttp copied to clipboard
aiohttp post memory leak?
Describe the bug
hi,friends, i have tested a case for 2 weeks, and the python server may had a memory leak ? By "mprof" tool, the memory always increased near "async with session.post" i don't know how to analysis the cause of the problem, any advice will be appreciate!!!
the RES memory data by every minute is here
23208
23324
23424
23448
23496
23524
23552
23572
23620
23644
23676
23692
23692
23724
23764
23788
23804
23816
23836
23860
23872
23892
23904
23928
23940
23960
23980
23992
24004
24028
24048
24060
24080
24092
24112
24124
24144
24164
24176
24196
24208
24232
24244
24264
24284
24296
24316
24328
24348
24360
24384
24404
24416
24436
24448
24468
24480
24500
24512
24532
24544
24568
24580
24600
24612
24636
24656
To Reproduce
client code request.py
import aiohttp
import asyncio
import json
import logging
import time
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(
filename='app.log',
mode='a',
maxBytes=500 * 1024 * 1024,
backupCount=3,
encoding='utf-8'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('my_logger')
logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
async def post_task(session, url, headers=None, json_param=None):
try:
logger.info(f"Request start")
async with session.post(url, headers=headers, data=json.dumps(json_param)) as resp:
if resp.status != 200:
logger.error(f"Request failed with status {resp.status}")
resp.release()
return None, time.time(), -1
reply = await resp.read()
end_t = time.time()
return reply, end_t, 0
except Exception as e:
logger.error(f"post_task failed: {e}")
return None, time.time(), -1
async def request():
connector = aiohttp.TCPConnector(limit=10)
timeout = aiohttp.ClientTimeout(total=15)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
tasks = [
post_task(session, "http://xx/test_memory/v1/text", headers={"Content-Type": "application/json"}, json_param={"key": "value"}),
post_task(session, "http://xx/test_memory/v1/subjectRecognition", headers={"Content-Type": "application/json"}, json_param={"key": "value"}),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, Exception):
logger.error(f"Task failed with exception: {result}")
else:
logger.info(f"Task succeeded: {result}")
async def main():
test = 0
while test <1:
await request()
await asyncio.sleep(0.1)
asyncio.run(main())
then execute "nohup python3 request.py >/dev/null 2>&1 &"
server code fake_server.py
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from pydantic import BaseModel
from fastapi import Request
import logging
import time
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(
filename='app.log',
mode='a',
maxBytes=5 * 1024 * 1024,
backupCount=3,
encoding='utf-8'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('my_logger')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
app = FastAPI(title="Multi-Service API Server")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
class EducationRequest(BaseModel):
data: str
model_type: str
version: str
type: str
request_id: str
return_probability: int
@app.post("/test_memory/v1/text")
async def ocr_text(request: Request):
#print("request ocr text")
logger.info(f"request ocr text")
try:
return {
"status": 200,
"message": "ocr_text",
"data": "ocr_text",
}
except Exception as e:
return {"status": "error", "message": str(e)}
@app.post("/test_memory/v1/subjectRecognition")
async def subjectRecognition(request: Request):
logger.info(f"request subjectRecognition")
try:
return {
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}
except Exception as e:
return {"status": "error", "message": str(e)}
if __name__ == "__main__":
uvicorn.run(app="fake_server:app", host="0.0.0.0", port=8888,workers=6)
then execute "nohup python3 fake_server.py >/dev/null 2>&1 &"
Expected behavior
The memory is in a relatively stable state,not increases so obviously.
Logs/tracebacks
nill
Python Version
Python 3.10.0
aiohttp Version
$ python -m pip show aiohttp
Name: aiohttp
Version: 3.8.1
Summary: Async http client/server framework (asyncio)
Home-page: https://github.com/aio-libs/aiohttp
Author:
Author-email:
License: Apache 2
Location: /usr/local/lib/python3.10/site-packages
Requires: aiosignal, async-timeout, attrs, charset-normalizer, frozenlist, multidict, yarl
Required-by: oppo-dapr-python-sdk
multidict Version
$ python -m pip show multidict
Name: multidict
Version: 6.1.0
Summary: multidict implementation
Home-page: https://github.com/aio-libs/multidict
Author: Andrew Svetlov
Author-email: [email protected]
License: Apache 2
Location: /usr/local/lib/python3.10/site-packages
Requires: typing-extensions
Required-by: aiohttp, sanic, yarl
propcache Version
$ python -m pip show propcache
Name: propcache
Version: 0.2.1
Summary: Accelerated property cache
Home-page: https://github.com/aio-libs/propcache
Author: Andrew Svetlov
Author-email: [email protected]
License: Apache-2.0
Location: /usr/local/lib/python3.10/site-packages
Requires:
Required-by: yarl
yarl Version
$ python -m pip show yarl
Name: yarl
Version: 1.18.3
Summary: Yet another URL library
Home-page: https://github.com/aio-libs/yarl
Author: Andrew Svetlov
Author-email: [email protected]
License: Apache-2.0
Location: /usr/local/lib/python3.10/site-packages
Requires: idna, multidict, propcache
Required-by: aiohttp
OS
linux
Related component
Server
Additional context
No response
Code of Conduct
- [x] I agree to follow the aio-libs Code of Conduct
24768 23208
These numbers suggest that it got garbage collected and is not constantly increasing?
24768 23208
These numbers suggest that it got garbage collected and is not constantly increasing?
sorry for mislead. 24768 was previous runtime, 23208 was the latest runtime. have update the data.
To sanity check, can you inspect the cookiejar and see if it's the same report as #11052.
To sanity check, can you inspect the cookiejar and see if it's the same report as #11052.
len(session.cookie_jar._cookies)=0
even with very low request frequency, the memory increases slowly, there is no any session.post error . In order to remove the impact of log writing, I removed all log writing operations.
23120
23296
23316
23336
import aiohttp
import asyncio
import json
import logging
import time
from pprint import pprint
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(
filename='app.log',
mode='a',
maxBytes=5 * 1024 * 1024,
backupCount=3,
encoding='utf-8'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('my_logger')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
async def cui_post():
connector = aiohttp.TCPConnector(limit=10)
timeout = aiohttp.ClientTimeout(total=15)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
cui_url="http://xx/cui/api/v1/subjectRecognition"
async with session.post(cui_url, headers={"Content-Type": "application/json"}) as resp:
if resp.status != 200:
logger.error(f"Request failed with status {resp.status}")
resp.release()
return
reply = await resp.read()
end_t = time.time()
return
async def main():
test = 0
while test <1:
results = await asyncio.gather(cui_post())
#logger.info(f"Task succeeded: {reply}")
await asyncio.sleep(2)
asyncio.run(main())
memory_profile show memory increase details as
Line # Mem usage Increment Occurrences Line Contents
=============================================================
64 24.4 MiB 24.4 MiB 1 @profile
65 async def cui_post():
66 24.4 MiB 0.0 MiB 1 connector = aiohttp.TCPConnector(limit=10)
67 24.4 MiB 0.0 MiB 1 timeout = aiohttp.ClientTimeout(total=15)
68 24.5 MiB 0.0 MiB 3 async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
69 24.4 MiB 0.0 MiB 1 cui_url="/cui/api/v1/subjectRecognition"
84 24.5 MiB 0.0 MiB 6 async with session.post(cui_url, headers={"Content-Type": "application/json"}) as resp:
85 24.5 MiB 0.0 MiB 1 if resp.status != 200:
86 logger.error(f"Request failed with status {resp.status}")
87 resp.release()
96 24.5 MiB 0.0 MiB 1 reply = await resp.read()
reproducer codes added
Hi! To help us investigate this issue effectively, we need a minimal reproducer. Let me explain what this means:
A minimal reproducer is a small, self-contained example that:
- Demonstrates only the specific issue - In this case, the memory increase during
async with session.post - Contains no external dependencies - Should not require any third-party libraries beyond aiohttp
- Is complete and runnable - Anyone should be able to copy-paste and run it without:
- Filling in missing URLs
- Adding missing imports
- Creating additional files
- Setting up external services
Here's what makes a good minimal reproducer:
import asyncio
import aiohttp
from aiohttp import web
# Server code
async def handle_post(request):
data = await request.read()
return web.Response(text="OK")
async def start_server():
app = web.Application()
app.router.add_post('/', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, 'localhost', 8080)
await site.start()
return runner
# Client code
async def main():
runner = await start_server()
async with aiohttp.ClientSession() as session:
for i in range(100):
async with session.post('http://localhost:8080/', data=b'test') as resp:
await resp.read()
# Check memory here
print(f"Request {i} completed")
await runner.cleanup()
if __name__ == "__main__":
asyncio.run(main())
This helps us because:
- We can run it immediately without setup
- We can isolate whether the issue is in aiohttp or in interaction with other libraries
- We can quickly test fixes and verify the problem
Could you please provide a similar self-contained example that demonstrates the memory increase you're seeing?
here is the code by your suggestion
import asyncio
import aiohttp
from aiohttp import web
import resource
# Server code
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
# Client code
async def main():
runner = await start_server()
async def request_post():
async with aiohttp.ClientSession() as session:
for i in range(100):
async with session.post('http://127.0.0.1:8080/test_memory/v1/subjectRecognition', data=b'test') as resp:
reply = await resp.read()
# Check memory here
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(f"RSS: {mem_kb:.2f} KB")
return reply
while True:
reply = await request_post()
await asyncio.sleep(1)
await runner.cleanup()
if __name__ == "__main__":
asyncio.run(main())
the RSS data
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23704.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 23968.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
RSS: 24224.00 KB
Looks like it leaks very very slowly. This one will be very hard to find and it might be in Cpython itself:
python3 issue11043.py
RSS: 45023232.00 KB
RSS: 45547520.00 KB
RSS: 45662208.00 KB
RSS: 45662208.00 KB
RSS: 45678592.00 KB
RSS: 45694976.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45711360.00 KB
RSS: 45809664.00 KB
RSS: 45809664.00 KB
RSS: 45842432.00 KB
RSS: 45842432.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45858816.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45875200.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45907968.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45924352.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45940736.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45957120.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
RSS: 45973504.00 KB
import asyncio
import aiohttp
from aiohttp import web
import resource
import tracemalloc
import gc
import sys
import time
from datetime import datetime
# Server code
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
def get_memory_info():
"""Get detailed memory information"""
if sys.platform == 'darwin': # macOS
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
else: # Linux
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
return {
'rss_mb': mem_kb / 1024,
'gc_stats': gc.get_stats(),
'gc_count': gc.get_count()
}
# Client code
async def main():
# Start tracemalloc
tracemalloc.start()
runner = await start_server()
iteration = 0
baseline_memory = None
memory_samples = []
async def request_post():
async with aiohttp.ClientSession() as session:
for i in range(100):
async with session.post('http://127.0.0.1:8080/test_memory/v1/subjectRecognition', data=b'test') as resp:
reply = await resp.read()
return reply
print(f"Starting memory leak test at {datetime.now()}")
print("=" * 80)
try:
while True:
iteration += 1
reply = await request_post()
# Force garbage collection
gc.collect()
# Get memory info
mem_info = get_memory_info()
if baseline_memory is None:
baseline_memory = mem_info['rss_mb']
memory_increase = mem_info['rss_mb'] - baseline_memory
memory_samples.append(mem_info['rss_mb'])
# Print stats every 10 iterations
if iteration % 10 == 0:
print(f"\nIteration {iteration}:")
print(f" RSS: {mem_info['rss_mb']:.2f} MB (baseline: {baseline_memory:.2f} MB, increase: {memory_increase:.2f} MB)")
print(f" GC Count: {mem_info['gc_count']}")
# Get top memory allocations
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
print("\n Top 5 memory allocations:")
for stat in top_stats[:5]:
print(f" {stat}")
# Check for memory leak trend
if len(memory_samples) >= 20:
recent_avg = sum(memory_samples[-10:]) / 10
older_avg = sum(memory_samples[-20:-10]) / 10
growth_rate = (recent_avg - older_avg) / older_avg * 100
print(f"\n Memory growth rate: {growth_rate:.2f}% over last 10 iterations")
await asyncio.sleep(1)
except KeyboardInterrupt:
print("\nStopping test...")
finally:
await runner.cleanup()
tracemalloc.stop()
if __name__ == "__main__":
asyncio.run(main())
I didn't find any ref leaks either
import asyncio
import aiohttp
from aiohttp import web
import resource
import tracemalloc
import gc
import sys
import weakref
import objgraph
from collections import defaultdict
# Server code
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
class ObjectTracker:
def __init__(self):
self.tracked_objects = defaultdict(list)
self.weak_refs = defaultdict(list)
def track(self, obj, label):
"""Track an object with a weak reference"""
try:
ref = weakref.ref(obj, lambda r: self.on_delete(label, r))
self.weak_refs[label].append(ref)
self.tracked_objects[label].append(id(obj))
except TypeError:
# Some objects can't be weakly referenced
pass
def on_delete(self, label, ref):
"""Called when a tracked object is deleted"""
pass
def get_alive_count(self):
"""Get count of alive objects by label"""
result = {}
for label, refs in self.weak_refs.items():
alive = sum(1 for ref in refs if ref() is not None)
result[label] = alive
return result
def get_alive_objects(self, label):
"""Get alive objects for a specific label"""
return [ref() for ref in self.weak_refs.get(label, []) if ref() is not None]
tracker = ObjectTracker()
async def main():
# Enable garbage collection debugging
gc.set_debug(gc.DEBUG_LEAK)
# Start tracemalloc
tracemalloc.start()
runner = await start_server()
iteration = 0
baseline_objects = None
print("Starting memory leak debug...")
print("=" * 80)
async def request_post():
session = aiohttp.ClientSession()
tracker.track(session, 'ClientSession')
try:
for i in range(100):
request_context = session.post('http://127.0.0.1:8080/test_memory/v1/subjectRecognition', data=b'test')
resp = await request_context.__aenter__()
tracker.track(resp, 'ClientResponse')
tracker.track(request_context, 'RequestContext')
reply = await resp.read()
await request_context.__aexit__(None, None, None)
return reply
finally:
await session.close()
try:
while iteration < 50: # Limited iterations for debugging
iteration += 1
reply = await request_post()
# Force garbage collection
gc.collect()
await asyncio.sleep(0.1) # Allow event loop to process
gc.collect()
# Get object counts
alive_counts = tracker.get_alive_count()
if iteration == 1:
baseline_objects = len(gc.get_objects())
current_objects = len(gc.get_objects())
# Print stats every 5 iterations
if iteration % 5 == 0:
print(f"\nIteration {iteration}:")
print(f" Total objects: {current_objects} (baseline: {baseline_objects}, growth: {current_objects - baseline_objects})")
print(f" Tracked alive objects: {alive_counts}")
# Check for specific aiohttp objects
aiohttp_objects = {}
for obj in gc.get_objects():
obj_type = type(obj).__name__
if obj_type in ['ClientSession', 'ClientResponse', 'ClientRequest',
'TCPConnector', 'Connection', 'ResponseHandler',
'HttpResponseParser', 'HttpRequestWriter']:
aiohttp_objects[obj_type] = aiohttp_objects.get(obj_type, 0) + 1
if aiohttp_objects:
print(f" AioHTTP objects in memory: {aiohttp_objects}")
# Check garbage collector stats
print(f" GC stats: {gc.get_count()}")
# Get uncollectable objects
uncollectable = gc.garbage
if uncollectable:
print(f" Uncollectable objects: {len(uncollectable)}")
for obj in uncollectable[:5]: # Show first 5
print(f" - {type(obj)}: {repr(obj)[:100]}")
# Memory snapshot
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
print("\n Top 3 memory allocations:")
for stat in top_stats[:3]:
print(f" {stat}")
# Check for circular references in tracked objects
if iteration % 10 == 0:
print("\n Checking for circular references...")
for label in ['ClientSession', 'ClientResponse']:
alive_objs = tracker.get_alive_objects(label)
if alive_objs:
print(f" {label}: {len(alive_objs)} alive objects")
# Try to find referrers
for obj in alive_objs[:2]: # Check first 2
referrers = gc.get_referrers(obj)
print(f" Object {id(obj)} has {len(referrers)} referrers")
if iteration == 25:
# Detailed analysis at midpoint
print("\n" + "="*80)
print("DETAILED ANALYSIS AT ITERATION 25")
print("="*80)
# Generate object growth graph
objgraph.show_growth(limit=10)
# Find most common types
print("\nMost common types:")
objgraph.show_most_common_types(limit=10)
except KeyboardInterrupt:
print("\nStopping test...")
finally:
await runner.cleanup()
tracemalloc.stop()
# Final garbage collection
gc.collect()
print("\nFinal analysis:")
print(f" Tracked alive objects: {tracker.get_alive_count()}")
if __name__ == "__main__":
asyncio.run(main())
I wonder if this is a case of https://github.com/python/cpython/pull/133617
import asyncio
import aiohttp
from aiohttp import web
import gc
import weakref
import psutil
import os
# Track objects with weak references
response_refs = []
session_refs = []
connector_refs = []
def track_object(obj, refs_list, name):
"""Track an object with a weak reference"""
try:
def on_delete(ref):
print(f"{name} deleted: {ref}")
ref = weakref.ref(obj, on_delete)
refs_list.append(ref)
except:
pass
# Server
async def handle_post(request):
data = await request.read()
return web.json_response({"status": "ok"}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
def get_memory_mb():
"""Get current process memory in MB"""
process = psutil.Process(os.getpid())
return process.memory_info().rss / 1024 / 1024
async def test_leak_pattern():
"""Test different usage patterns to identify the leak"""
runner = await start_server()
print("Testing memory leak patterns...")
print("=" * 60)
# Pattern 1: Proper context manager usage
print("\nPattern 1: Proper context manager usage")
start_mem = get_memory_mb()
for i in range(5):
async with aiohttp.ClientSession() as session:
track_object(session, session_refs, f"Session-P1-{i}")
if hasattr(session, '_connector'):
track_object(session._connector, connector_refs, f"Connector-P1-{i}")
for j in range(100):
async with session.post('http://127.0.0.1:8080/test', data=b'test') as resp:
track_object(resp, response_refs, f"Response-P1-{i}-{j}")
data = await resp.read()
gc.collect()
await asyncio.sleep(0.1)
gc.collect()
print(f" Iteration {i+1}: Memory: {get_memory_mb():.2f} MB (delta: {get_memory_mb() - start_mem:.2f} MB)")
print(f" Alive - Sessions: {sum(1 for r in session_refs if r() is not None)}, "
f"Responses: {sum(1 for r in response_refs if r() is not None)}, "
f"Connectors: {sum(1 for r in connector_refs if r() is not None)}")
# Clear refs for next pattern
response_refs.clear()
session_refs.clear()
connector_refs.clear()
# Pattern 2: Session reuse (like in the original issue)
print("\n\nPattern 2: Session reuse in function scope")
start_mem = get_memory_mb()
async def make_requests(iteration):
async with aiohttp.ClientSession() as session:
track_object(session, session_refs, f"Session-P2-{iteration}")
if hasattr(session, '_connector'):
track_object(session._connector, connector_refs, f"Connector-P2-{iteration}")
for i in range(100):
async with session.post('http://127.0.0.1:8080/test', data=b'test') as resp:
track_object(resp, response_refs, f"Response-P2-{iteration}-{i}")
reply = await resp.read()
return reply
for i in range(5):
await make_requests(i)
gc.collect()
await asyncio.sleep(0.1)
gc.collect()
print(f" Iteration {i+1}: Memory: {get_memory_mb():.2f} MB (delta: {get_memory_mb() - start_mem:.2f} MB)")
print(f" Alive - Sessions: {sum(1 for r in session_refs if r() is not None)}, "
f"Responses: {sum(1 for r in response_refs if r() is not None)}, "
f"Connectors: {sum(1 for r in connector_refs if r() is not None)}")
# Pattern 3: Check specific object relationships
print("\n\nPattern 3: Checking object relationships")
# Create a single session and response to examine
async with aiohttp.ClientSession() as session:
async with session.post('http://127.0.0.1:8080/test', data=b'test') as resp:
data = await resp.read()
# Check references
print(f" Response has session ref: {hasattr(resp, '_session') and resp._session is not None}")
print(f" Response has connection ref: {hasattr(resp, '_connection') and resp._connection is not None}")
print(f" Response has writer ref: {hasattr(resp, '_ClientResponse__writer') and resp._ClientResponse__writer is not None}")
if hasattr(resp, '_connection') and resp._connection:
conn = resp._connection
print(f" Connection has connector ref: {hasattr(conn, '_connector') and conn._connector is not None}")
print(f" Connection has protocol ref: {hasattr(conn, '_protocol') and conn._protocol is not None}")
await runner.cleanup()
# Final cleanup and analysis
gc.collect()
await asyncio.sleep(0.5)
gc.collect()
print("\n\nFinal Analysis:")
print(f" Remaining alive - Sessions: {sum(1 for r in session_refs if r() is not None)}, "
f"Responses: {sum(1 for r in response_refs if r() is not None)}, "
f"Connectors: {sum(1 for r in connector_refs if r() is not None)}")
if __name__ == "__main__":
asyncio.run(test_leak_pattern())
No leaks in the connection pooling.. I'm thinking its in a dep or cpython
Looks like we have a circular reference ClientResponse holds a reference to ClientSession via _session, and this reference is not always cleared
But it does seem to eventually get GC'ed
slow leak detector
#!/usr/bin/env python3
"""Test for very slow memory leaks by monitoring allocation patterns"""
import asyncio
import aiohttp
from aiohttp import web
import gc
import tracemalloc
import time
import sys
from collections import defaultdict
# Global stats
allocation_stats = defaultdict(lambda: {'count': 0, 'size': 0, 'samples': []})
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
def analyze_allocations(snapshot1, snapshot2, iteration):
"""Analyze memory allocations between snapshots"""
stats = snapshot2.compare_to(snapshot1, 'traceback')
# Track allocations that are consistently growing
for stat in stats:
if stat.count_diff > 0: # Only growing allocations
key = str(stat.traceback)
allocation_stats[key]['count'] += stat.count_diff
allocation_stats[key]['size'] += stat.size_diff
allocation_stats[key]['samples'].append({
'iteration': iteration,
'count_diff': stat.count_diff,
'size_diff': stat.size_diff,
'traceback': stat.traceback
})
def find_consistent_leaks():
"""Find allocations that consistently grow"""
consistent_leaks = []
for key, stats in allocation_stats.items():
if len(stats['samples']) < 10: # Need enough samples
continue
# Check if it's consistently growing (at least 80% of samples show growth)
growing_samples = sum(1 for s in stats['samples'] if s['size_diff'] > 0)
if growing_samples / len(stats['samples']) >= 0.8:
# Calculate average growth per iteration
total_iterations = stats['samples'][-1]['iteration'] - stats['samples'][0]['iteration']
if total_iterations > 0:
growth_rate = stats['size'] / total_iterations
if growth_rate > 0: # Any consistent growth
consistent_leaks.append({
'key': key,
'total_size': stats['size'],
'total_count': stats['count'],
'growth_rate': growth_rate,
'samples': len(stats['samples']),
'traceback': stats['samples'][0]['traceback']
})
return sorted(consistent_leaks, key=lambda x: x['growth_rate'], reverse=True)
async def main():
# Start with detailed tracemalloc
tracemalloc.start(10)
runner = await start_server()
print("Detecting slow memory leaks...")
print("=" * 80)
print(f"Python: {sys.version}")
print(f"aiohttp: {aiohttp.__version__}")
print("This test will run for several minutes to detect slow leaks")
print("=" * 80)
# Original pattern from issue
async def request_post():
async with aiohttp.ClientSession() as session:
for i in range(100):
async with session.post('http://127.0.0.1:8080/test_memory/v1/subjectRecognition', data=b'test') as resp:
reply = await resp.read()
return reply
# Warm up
print("\nWarming up...")
for _ in range(10):
await request_post()
await asyncio.sleep(1)
gc.collect()
initial_snapshot = tracemalloc.take_snapshot()
print("\nRunning leak detection...")
start_time = time.time()
# Run for a longer period to detect slow leaks
for iteration in range(200): # 200 iterations
await request_post()
await asyncio.sleep(1)
if iteration % 10 == 0 and iteration > 0:
gc.collect()
current_snapshot = tracemalloc.take_snapshot()
# Analyze allocations
analyze_allocations(initial_snapshot, current_snapshot, iteration)
# Report progress
elapsed = time.time() - start_time
print(f"\rIteration {iteration}/200 ({elapsed:.1f}s elapsed)...", end='', flush=True)
print("\n\nAnalyzing results...")
# Find consistent leaks
leaks = find_consistent_leaks()
if leaks:
print(f"\nFound {len(leaks)} potential memory leaks:")
print("=" * 80)
for i, leak in enumerate(leaks[:10], 1): # Top 10 leaks
print(f"\nLeak #{i}:")
print(f" Growth rate: {leak['growth_rate']:.1f} bytes/iteration")
print(f" Total leaked: {leak['total_size']} bytes ({leak['total_count']} objects)")
print(f" Samples: {leak['samples']}")
print(" Traceback:")
for frame in leak['traceback'][:5]: # First 5 frames
print(f" {frame}")
# Estimate time to leak 1MB at this rate
if leak['growth_rate'] > 0:
iterations_per_mb = 1024 * 1024 / leak['growth_rate']
time_per_mb = iterations_per_mb / 60 # minutes
print(f" Time to leak 1MB: {time_per_mb:.1f} minutes")
else:
print("\nNo consistent memory leaks detected")
# Final memory snapshot comparison
print("\n" + "=" * 80)
print("Overall memory growth:")
final_snapshot = tracemalloc.take_snapshot()
top_stats = final_snapshot.compare_to(initial_snapshot, 'lineno')
total_growth = sum(stat.size_diff for stat in top_stats if stat.size_diff > 0)
print(f" Total memory growth: {total_growth / 1024:.1f} KB")
print(f" Growth per iteration: {total_growth / 200:.1f} bytes")
print("\nTop 10 growing allocations by line:")
for stat in sorted(top_stats, key=lambda x: x.size_diff, reverse=True)[:10]:
if stat.size_diff > 0:
print(f" {stat}")
await runner.cleanup()
tracemalloc.stop()
if __name__ == "__main__":
asyncio.run(main())
maybe a leak in asyncio?
% python3 test_slow_memory_pattern.py
Detecting slow memory leaks...
================================================================================
Python: 3.13.3 (main, Apr 8 2025, 13:54:08) [Clang 16.0.0 (clang-1600.0.26.6)]
aiohttp: 3.12.5.dev0
This test will run for several minutes to detect slow leaks
================================================================================
Warming up...
Running leak detection...
Iteration 190/200 (219.0s elapsed)...
Analyzing results...
Found 4 potential memory leaks:
================================================================================
Leak #1:
Growth rate: 4836.4 bytes/iteration
Total leaked: 870550 bytes (9595 objects)
Samples: 316
Traceback:
/Users/bdraco/aiohttp/test_slow_memory_pattern.py:171
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:195
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:118
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:706
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:677
Time to leak 1MB: 3.6 minutes
Leak #2:
Growth rate: 1433.6 bytes/iteration
Total leaked: 243714 bytes (5330 objects)
Samples: 83
Traceback:
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:195
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:118
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:706
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:677
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:2034
Time to leak 1MB: 12.2 minutes
Leak #3:
Growth rate: 184.4 bytes/iteration
Total leaked: 31344 bytes (285 objects)
Samples: 18
Traceback:
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:118
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:706
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:677
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:2034
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/events.py:89
Time to leak 1MB: 94.8 minutes
Leak #4:
Growth rate: 31.2 bytes/iteration
Total leaked: 5624 bytes (38 objects)
Samples: 38
Traceback:
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:706
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:677
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py:2034
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/events.py:89
/Users/bdraco/aiohttp/test_slow_memory_pattern.py:111
Time to leak 1MB: 559.3 minutes
================================================================================
Overall memory growth:
Total memory growth: 170.2 KB
Growth per iteration: 871.6 bytes
Top 10 growing allocations by line:
/Users/bdraco/aiohttp/test_slow_memory_pattern.py:42: size=87.2 KiB (+87.2 KiB), count=931 (+931), average=96 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/tracemalloc.py:498: size=21.7 KiB (+21.7 KiB), count=463 (+463), average=48 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/tracemalloc.py:193: size=15.8 KiB (+15.8 KiB), count=137 (+137), average=118 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/tracemalloc.py:115: size=11.7 KiB (+11.7 KiB), count=150 (+150), average=80 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/events.py:38: size=6592 B (+6528 B), count=103 (+102), average=64 B
/Users/bdraco/aiohttp/aiohttp/payload.py:361: size=5224 B (+5224 B), count=44 (+44), average=119 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/events.py:89: size=5601 B (+4603 B), count=113 (+97), average=50 B
/Users/bdraco/aiohttp/aiohttp/payload.py:123: size=4440 B (+4320 B), count=37 (+36), average=120 B
/opt/homebrew/Cellar/[email protected]/3.13.3/Frameworks/Python.framework/Versions/3.13/lib/python3.13/tracemalloc.py:502: size=3904 B (+3904 B), count=122 (+122), average=32 B
/Users/bdraco/aiohttp/test_slow_memory_pattern.py:13: size=1200 B (+1200 B), count=15 (+15), average=80 B
AFAICT there is no leak an its a case of needing to malloc_trim https://github.com/aio-libs/aiohttp/issues/4618#issuecomment-2391898609
We definitely aren't leaking any python objects. If there is a leak, its not in aiohttp. It has to be in a dep.
#!/usr/bin/env python3
"""Ultra-detailed test for micro memory leaks"""
import asyncio
import aiohttp
from aiohttp import web
import gc
import sys
import tracemalloc
import weakref
from datetime import datetime
import psutil
import os
# Configuration
ITERATIONS_PER_BATCH = 100
BATCHES = 100 # Total requests = 10,000
REPORT_EVERY = 10 # Report every 10 batches
# Track specific objects
tracked_sessions = weakref.WeakSet()
tracked_responses = weakref.WeakSet()
tracked_requests = weakref.WeakSet()
# Monkey patch to track objects
original_session_init = aiohttp.ClientSession.__init__
original_response_init = aiohttp.ClientResponse.__init__
original_request_init = aiohttp.ClientRequest.__init__
def track_session_init(self, *args, **kwargs):
result = original_session_init(self, *args, **kwargs)
tracked_sessions.add(self)
return result
def track_response_init(self, *args, **kwargs):
result = original_response_init(self, *args, **kwargs)
tracked_responses.add(self)
return result
def track_request_init(self, *args, **kwargs):
result = original_request_init(self, *args, **kwargs)
tracked_requests.add(self)
return result
# Apply patches
aiohttp.ClientSession.__init__ = track_session_init
aiohttp.ClientResponse.__init__ = track_response_init
aiohttp.ClientRequest.__init__ = track_request_init
def get_detailed_memory():
"""Get detailed memory statistics"""
process = psutil.Process(os.getpid())
info = process.memory_info()
return {
'rss_mb': info.rss / 1024 / 1024,
'vms_mb': info.vms / 1024 / 1024,
'sessions': len(tracked_sessions),
'responses': len(tracked_responses),
'requests': len(tracked_requests),
'gc_objects': len(gc.get_objects()),
}
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def main():
# Enable tracemalloc
tracemalloc.start(10)
# Start server
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8082)
await site.start()
print("Ultra-detailed memory leak detection")
print("=" * 80)
print(f"Started: {datetime.now()}")
print(f"Python: {sys.version}")
print(f"aiohttp: {aiohttp.__version__}")
print(f"Test: {BATCHES} batches × {ITERATIONS_PER_BATCH} requests = {BATCHES * ITERATIONS_PER_BATCH} total")
print("=" * 80)
# Original pattern from issue
async def request_post():
async with aiohttp.ClientSession() as session:
for i in range(ITERATIONS_PER_BATCH):
async with session.post('http://127.0.0.1:8082/test_memory/v1/subjectRecognition', data=b'test') as resp:
reply = await resp.read()
return reply
# Warm up
print("\nWarming up...")
for _ in range(5):
await request_post()
await asyncio.sleep(1)
# Force cleanup and take baseline
gc.collect()
gc.collect()
gc.collect()
baseline_memory = get_detailed_memory()
baseline_snapshot = tracemalloc.take_snapshot()
print(f"\nBaseline:")
print(f" RSS: {baseline_memory['rss_mb']:.2f} MB")
print(f" GC objects: {baseline_memory['gc_objects']}")
# Data collection
memory_samples = []
print("\nRunning test...")
for batch in range(BATCHES):
await request_post()
await asyncio.sleep(1)
# Periodic reporting
if (batch + 1) % REPORT_EVERY == 0:
# Force GC
gc.collect()
gc.collect()
# Get current stats
current_memory = get_detailed_memory()
memory_samples.append(current_memory)
# Calculate deltas
rss_delta = current_memory['rss_mb'] - baseline_memory['rss_mb']
gc_delta = current_memory['gc_objects'] - baseline_memory['gc_objects']
print(f"\nBatch {batch + 1}/{BATCHES}:")
print(f" RSS: {current_memory['rss_mb']:.2f} MB (Δ {rss_delta:+.2f} MB)")
print(f" GC objects: {current_memory['gc_objects']} (Δ {gc_delta:+d})")
print(f" Tracked - Sessions: {current_memory['sessions']}, "
f"Responses: {current_memory['responses']}, "
f"Requests: {current_memory['requests']}")
# Calculate leak rate
if len(memory_samples) >= 3:
# Simple linear regression on last 3 samples
recent = memory_samples[-3:]
rss_values = [s['rss_mb'] for s in recent]
if len(set(rss_values)) > 1: # Only if there's variation
# Calculate average growth per batch
growth = (rss_values[-1] - rss_values[0]) / (3 * REPORT_EVERY)
leak_per_request = growth * 1024 / ITERATIONS_PER_BATCH # KB per request
print(f" Leak rate: {leak_per_request:.3f} KB/request")
# Estimate time to leak 1MB
if leak_per_request > 0:
requests_per_mb = 1024 / leak_per_request
print(f" Requests to leak 1MB: {requests_per_mb:.0f}")
# Final analysis
print("\n" + "=" * 80)
print("Final Analysis:")
gc.collect()
final_memory = get_detailed_memory()
final_snapshot = tracemalloc.take_snapshot()
# Memory growth
total_rss_growth = final_memory['rss_mb'] - baseline_memory['rss_mb']
total_requests = BATCHES * ITERATIONS_PER_BATCH
print(f"\nMemory growth:")
print(f" Total RSS growth: {total_rss_growth:.2f} MB")
print(f" Growth per request: {total_rss_growth * 1024 / total_requests:.3f} KB")
print(f" GC objects growth: {final_memory['gc_objects'] - baseline_memory['gc_objects']}")
# Tracemalloc analysis
print("\nTop memory allocations (by total size):")
top_stats = final_snapshot.compare_to(baseline_snapshot, 'lineno')
for stat in sorted(top_stats, key=lambda x: x.size_diff, reverse=True)[:15]:
if stat.size_diff > 1024: # Only show > 1KB
print(f" {stat}")
# Check for leaked tracked objects
print(f"\nTracked objects still alive:")
print(f" Sessions: {len(tracked_sessions)}")
print(f" Responses: {len(tracked_responses)}")
print(f" Requests: {len(tracked_requests)}")
# Conclusion
if total_rss_growth < 1:
print("\n✅ No significant memory leak detected")
elif total_rss_growth < 5:
print(f"\n⚠️ Small memory growth detected: {total_rss_growth:.2f} MB")
else:
print(f"\n❌ Significant memory leak: {total_rss_growth:.2f} MB")
await runner.cleanup()
tracemalloc.stop()
# Restore original methods
aiohttp.ClientSession.__init__ = original_session_init
aiohttp.ClientResponse.__init__ = original_response_init
aiohttp.ClientRequest.__init__ = original_request_init
if __name__ == "__main__":
asyncio.run(main())
malloc_trim
you mean malloc_trim will be added to free the memory? is it a "remedy" solution. and the real truth can not be figure out till now? Who or which project can we continue to turn to for help?=_=
have try to utilize malloc_trim ,it seems does not decrease memory?
import asyncio
import aiohttp
from aiohttp import web
import resource
import ctypes
# Server code
async def handle_post(request):
data = await request.read()
return web.json_response({
"code": 0,
"message": "subjectRecognition",
"data": "subjectRecognition",
}, status=200)
async def start_server():
app = web.Application()
app.router.add_post('/test_memory/v1/subjectRecognition', handle_post)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '127.0.0.1', 8080)
await site.start()
return runner
def trim_memory() -> int:
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(f"before trim RSS: {mem_kb:.2f} KB")
libc = ctypes.CDLL("libc.so.6")
libc.malloc_trim(0)
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(f"after trim RSS: {mem_kb:.2f} KB")
# Client code
async def main():
runner = await start_server()
j = 0
async def request_post():
async with aiohttp.ClientSession() as session:
for i in range(100):
async with session.post('http://127.0.0.1:8080/test_memory/v1/subjectRecognition', data=b'test') as resp:
reply = await resp.read()
# Check memory here
mem_kb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print(f"RSS: {mem_kb:.2f} KB")
return reply
while True:
if j>10:
print(f"start to trim")
trim_memory()
j = 0
j+=1
reply = await request_post()
await asyncio.sleep(1)
await runner.cleanup()
if __name__ == "__main__":
asyncio.run(main())
I wonder if this was related to the multidict memory leak that I have been working on trying to patch.