-
|
I am currently using sea-orm with PostgreSQL. It works fine, however, when i insert a huge entry using I wrote a small demo to demonstrate. async fn test_process_signal_service() -> anyhow::Result<()> {
let mut opt: ConnectOptions = ConnectOptions::new("postgres://ly:*****@127.0.0.1:5432/postgres");
opt.max_connections(20)
.min_connections(5)
.sqlx_logging(false);
let db = Database::connect(opt).await?;
let content = fs::read_to_string("test.txt").unwrap();
let array : Vec<ProcessData> = serde_json::from_str(&content).unwrap();
let active_model = align_data_segment::ActiveModel {
align_data_id: Set(10),
data: Set(array), // data is a huge json, approximately 10 MB.
start_at: Set(2132133245231),
end_at: Set(53242113321),
sequence_id: Set(0),
..Default::default()
};
println!("the memory before insert: {}",get_memory_usage()); // print 10518KB
active_model.insert(&db).await?;
println!("the memory after insert:: {}",get_memory_usage()); // print 121443KB
Ok(())
}
pub fn get_memory_usage() -> i64 {
let pid = process::id();
let process = Process::new(pid as i32).unwrap();
let status = process.stat().unwrap();
status.rss // RSS (Resident Set Size),KB
}Because this table will be inserted multiple times in my project, this leads to increasingly higher memory usage. I don't understand why, but when I use raw SQL with sea-orm, the memory usage returns to normal. async fn test_process_signal_service() -> anyhow::Result<()> {
let mut opt: ConnectOptions = ConnectOptions::new("postgres://ly:*****@127.0.0.1:5432/postgres");
opt.max_connections(20)
.min_connections(5)
.sqlx_logging(false);
let db = Database::connect(opt).await?;
let content = fs::read_to_string("test.txt").unwrap();
let array : Vec<ProcessData> = serde_json::from_str(&content).unwrap();
let sql = r#"
insert into align_data_segment (align_data_id, data, start_at, end_at, sequence_id)
VALUES ($1, $2::jsonb, $3, $4, $5)
"#;
let stmt = Statement::from_sql_and_values(
DbBackend::Postgres,
sql,
vec![
Value::from(10),
Value::from(content), // content is a huge json, approximately 10 MB.
Value::from(124213),
Value::from(2345443),
Value::from(0),
],
);
println!("the memory before insert: {}",get_memory_usage()); // print 10518KB
let _ = db.query_one(stmt).await.unwrap();
println!("the memory after insert:: {}",get_memory_usage()); // print 13164KB
Ok(())
}Is this normal ORM behavior, or am I using it incorrectly? |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 3 replies
-
|
I don't think RSS is a good fine grained measure for the real memory usage of a single statement. I don't believe there is memory leak or anything specifc about SeaORM. Or don't even parse the JSON as serde_json::Value. process it as a string and |
Beta Was this translation helpful? Give feedback.
We don't have caches. The default allocator does not return memory to the OS immediately. Memory fragmentation can also prevent memory from being reclaimed by the system.
Compared to the raw SQL, we performed more heap allocations.
We did not implement any special Drop. If memory still "leaks" after switching allocators, it may be due to one of our dependencies.