diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 6343145..3facbfe 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -13,6 +13,7 @@
 #include <linux/freezer.h>
 #include <linux/syscore_ops.h>
 #include <linux/export.h>
+#include <linux/memcontrol.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -20,6 +21,7 @@
 #include <xen/events.h>
 #include <xen/hvc-console.h>
 #include <xen/xen-ops.h>
+#include <xen/interface/pagecache.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
@@ -103,6 +105,115 @@ static int xen_suspend(void *data)
 	return 0;
 }
 
+#define for_each_free_page() \
+    for_each_populated_zone(zone) \
+        for_each_migratetype_order(order, t) \
+            list_for_each(curr, &zone->free_area[order].free_list[t])
+
+static void aggregate_free_pages(void)
+{
+    struct zone *zone;
+    struct list_head *curr;
+    unsigned long i, cnt = 0, nr_free = 0;
+    int order, t;
+    pagecache_notify_pages_req_t req;
+
+    for_each_free_page() {
+        struct page *page = list_entry(curr, struct page, lru);
+        unsigned long pfn = page_to_pfn(page);
+        for (i = 0; i < (1UL << order); i++) {
+            req.pfn[nr_free]   = pfn + i;
+            req.sec[nr_free++] = 1;
+            cnt++;
+            if (nr_free == MAX_PAGES_REQUEST) {
+                req.len = nr_free;
+                HYPERVISOR_hvm_op(HVMOP_pages_notify, &req);
+                nr_free = 0;
+            }
+        }
+    }
+    if (nr_free > 0) {
+        req.len = nr_free;
+        HYPERVISOR_hvm_op(HVMOP_pages_notify, &req);
+    }
+
+    printk(KERN_ALERT "xen_pagecache: aggregate_free_pages %lu\n", cnt);
+
+    return;
+}
+
+#define generic_for_each_mem_cgroup() \
+    for (memcg = mem_cgroup_iter(NULL, NULL, NULL), count = 0, \
+        lruvec = mem_cgroup_zone_lruvec(zone, memcg); \
+        (count == 0) || (count > 0 && memcg != NULL); \
+        memcg = mem_cgroup_iter(NULL, memcg, NULL), count++, \
+        lruvec = (memcg != NULL ? mem_cgroup_zone_lruvec(zone, memcg):NULL))
+
+#define for_each_page() \
+    for_each_populated_zone(zone) \
+        for_each_lru(lru) \
+            generic_for_each_mem_cgroup() \
+                list_for_each(curr, &lruvec->lists[lru])
+
+static void aggregate_clean_pages(void)
+{
+    enum lru_list lru;
+    struct zone *zone;
+    struct list_head *curr;
+    struct lruvec *lruvec;
+    struct mem_cgroup *memcg;
+    unsigned long nr_clean = 0;
+    unsigned long clean = 0;
+    unsigned long dirty = 0;
+    int count = 0;
+    pagecache_notify_pages_req_t req;
+
+    for_each_page() {
+        struct page *page = NULL;
+
+        if (list_empty(curr))
+            continue;
+
+        page = list_entry(curr, struct page, lru);
+
+        if (!page)
+            continue;
+
+        if (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE) {
+            struct address_space *mapping = page->mapping;
+            sector_t block = 0;
+
+            if (!(mapping && mapping->a_ops && mapping->a_ops->bmap))
+                continue;
+
+            if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+                block = mapping->a_ops->bmap(mapping, page->index);
+                req.pfn[nr_clean]   = page_to_pfn(page);
+                req.sec[nr_clean++] = block * 8;
+
+                if (nr_clean == MAX_PAGES_REQUEST) {
+                    req.len = nr_clean;
+                    HYPERVISOR_hvm_op(HVMOP_pages_notify, &req);
+                    nr_clean = 0;
+                }
+                clean++;
+            } else
+                dirty++;
+        }
+
+    }
+
+    if (nr_clean > 0) {
+        req.len = nr_clean;
+        HYPERVISOR_hvm_op(HVMOP_pages_notify, &req);
+    }
+
+    printk(KERN_ALERT "xen_pagecache: aggregate_clean_pages, "
+            "dirty:%lu, clean:%lu\n", dirty, clean);
+
+    return;
+}
+
 static void do_suspend(void)
 {
 	int err;
@@ -116,6 +227,7 @@ static void do_suspend(void)
 		goto out;
 	}
 
+    aggregate_clean_pages();
 	err = freeze_kernel_threads();
 	if (err) {
 		pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
@@ -150,6 +262,7 @@ static void do_suspend(void)
 		si.post = &xen_post_suspend;
 	}
 
+    aggregate_free_pages();
 	err = stop_machine(xen_suspend, &si, cpumask_of(0));
 
 	/* Resume console as early as possible. */
diff --git a/include/xen/interface/pagecache.h b/include/xen/interface/pagecache.h
new file mode 100644
index 0000000..4da82c9
--- /dev/null
+++ b/include/xen/interface/pagecache.h
@@ -0,0 +1,22 @@
+/******************************************************************************
+ * pagecache.h
+ *
+ * Pagecache interface between guests and hypervisor.
+ *
+ * Copyright (c) 2015-2015, Changyeon Jo.
+ */
+
+#ifndef __XEN_PUBLIC_PAGECACHE_H__
+#define __XEN_PUBLIC_PAGECACHE_H__
+
+#define MAX_PAGES_REQUEST 48
+#define HVMOP_pages_notify 24
+
+struct pagecache_notify_pages_req {
+    unsigned long pfn[MAX_PAGES_REQUEST];
+    unsigned long sec[MAX_PAGES_REQUEST];
+    unsigned long len;
+};
+typedef struct pagecache_notify_pages_req pagecache_notify_pages_req_t;
+
+#endif /* __XEN_PUBLIC_PAGECACHE_H__ */
