age->page_type. Unused for page tables. * @__page_refcount: Same as page refcount. - * @pt_memcg_data: Memcg data. Tracked for page tables here. + * @pt_memcg: Memcg that this page table belongs to. * * This struct overlays struct page for now. Do not modify without a good * understanding of the issues. @@ -602,7 +602,7 @@ struct ptdesc { unsigned int __page_type; atomic_t __page_refcount; #ifdef CONFIG_MEMCG - unsigned long pt_memcg_data; + struct mem_cgroup *pt_memcg; #endif }; @@ -617,7 +617,7 @@ TABLE_MATCH(rcu_head, pt_rcu_head); TABLE_MATCH(page_type, __page_type); TABLE_MATCH(_refcount, __page_refcount); #ifdef CONFIG_MEMCG -TABLE_MATCH(memcg_data, pt_memcg_data); +TABLE_MATCH(memcg_data, pt_memcg); #endif #undef TABLE_MATCH static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3c9c266cf782..0da38ea25c97 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -518,7 +518,8 @@ static inline const char *vm_event_name(enum vm_event_item item) void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); - +void memcg_stat_mod(struct mem_cgroup *memcg, pg_data_t *pgdat, + enum node_stat_item idx, long val); void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val); @@ -536,6 +537,12 @@ static inline void mod_lruvec_state(struct lruvec *lruvec, mod_node_page_state(lruvec_pgdat(lruvec), idx, val); } +static inline void memcg_stat_mod(struct mem_cgroup *memcg, pg_data_t *pgdat, + enum node_stat_item idx, long val) +{ + mod_node_page_state(pgdat, idx, val); +} + static inline void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a52da3a5e4fd..8d9e4a42aecf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -787,24 +787,27 @@ void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, mod_memcg_lruvec_state(lruvec, idx, val); } +void memcg_stat_mod(struct mem_cgroup *memcg, pg_data_t *pgdat, + enum node_stat_item idx, long val) +{ + /* Untracked pages have no memcg, no lruvec. Update only the node */ + if (!memcg) { + mod_node_page_state(pgdat, idx, val); + } else { + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); + mod_lruvec_state(lruvec, idx, val); + } +} + void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { struct mem_cgroup *memcg; pg_data_t *pgdat = folio_pgdat(folio); - struct lruvec *lruvec; rcu_read_lock(); memcg = folio_memcg(folio); - /* Untracked pages have no memcg, no lruvec. Update only the node */ - if (!memcg) { - rcu_read_unlock(); - mod_node_page_state(pgdat, idx, val); - return; - } - - lruvec = mem_cgroup_lruvec(memcg, pgdat); - mod_lruvec_state(lruvec, idx, val); + memcg_stat_mod(memcg, pgdat, idx, val); rcu_read_unlock(); } EXPORT_SYMBOL(lruvec_stat_mod_folio); @@ -812,24 +815,9 @@ EXPORT_SYMBOL(lruvec_stat_mod_folio); void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { pg_data_t *pgdat = page_pgdat(virt_to_page(p)); - struct mem_cgroup *memcg; - struct lruvec *lruvec; rcu_read_lock(); - memcg = mem_cgroup_from_virt(p); - - /* - * Untracked pages have no memcg, no lruvec. Update only the - * node. If we reparent the slab objects to the root memcg, - * when we free the slab object, we need to update the per-memcg - * vmstats to keep it correct for the root memcg. - */ - if (!memcg) { - mod_node_page_state(pgdat, idx, val); - } else { - lruvec = mem_cgroup_lruvec(memcg, pgdat); - mod_lruvec_state(lruvec, idx, val); - } + memcg_stat_mod(mem_cgroup_from_virt(p), pgdat, idx, val); rcu_read_unlock(); } [PATCH] Revert "ptdesc: remove references to folios from __pagetable_ctor() and pagetable_dtor()"Matthew Wilcox undefinedAxel Rasmussen undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined