+void zero_pages_with_mmap(page_index_t start, page_index_t end) {
+ int i;
+ void *addr = (void *) page_address(start), *new_addr;
+ size_t length = PAGE_BYTES*(1+end-start);
+
+ if (start > end)
+ return;
+
+ os_invalidate(addr, length);
+ new_addr = os_validate(addr, length);
+ if (new_addr == NULL || new_addr != addr) {
+ lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr);
+ }
+
+ for (i = start; i <= end; i++) {
+ page_table[i].need_to_zero = 0;
+ }
+}
+
+/* Zero the pages from START to END (inclusive). Generally done just after
+ * a new region has been allocated.
+ */
+static void
+zero_pages(page_index_t start, page_index_t end) {
+ if (start > end)
+ return;
+
+ fast_bzero(page_address(start), PAGE_BYTES*(1+end-start));
+}
+
+/* Zero the pages from START to END (inclusive), except for those
+ * pages that are known to already zeroed. Mark all pages in the
+ * ranges as non-zeroed.
+ */
+static void
+zero_dirty_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ for (i = start; i <= end; i++) {
+ if (page_table[i].need_to_zero == 1) {
+ zero_pages(start, end);
+ break;
+ }
+ }
+
+ for (i = start; i <= end; i++) {
+ page_table[i].need_to_zero = 1;
+ }
+}
+