/external/chromium/base/ |
H A D | sys_info_freebsd.cc | 14 int pages, page_size; local 15 size_t size = sizeof(pages); 16 sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0); 18 if (pages == -1 || page_size == -1) { 22 return static_cast<int64>(pages) * page_size;
|
H A D | sys_info_linux.cc | 13 long pages = sysconf(_SC_PHYS_PAGES); local 15 if (pages == -1 || page_size == -1) { 20 return static_cast<int64>(pages) * page_size;
|
/external/webkit/WebCore/page/ |
H A D | PageGroupLoadDeferrer.cpp | 35 const HashSet<Page*>& pages = page->group().pages(); local 37 HashSet<Page*>::const_iterator end = pages.end(); 38 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
H A D | PageGroup.cpp | 138 HashSet<Page*> pages = it->second->pages(); local 139 HashSet<Page*>::iterator pagesEnd = pages.end(); 141 for(HashSet<Page*>::iterator pit = pages.begin(); pit != pagesEnd; ++pit) { 145 // to the same value on all private browsing and "normal" pages, 150 // DOM storage is disabled in private browsing pages, so nothing to do if 164 // If page is still null at this point, then the only pages that are 165 // open are private browsing pages. Hence no pages are currently using local 190 HashSet<Page*> pages local [all...] |
H A D | FrameTree.cpp | 199 // Search the entire tree of each of the other pages in this namespace. 201 const HashSet<Page*>& pages = page->group().pages(); local 202 HashSet<Page*>::const_iterator end = pages.end(); 203 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
H A D | PageGroup.h | 55 const HashSet<Page*>& pages() const { return m_pages; } function in class:WebCore::PageGroup
|
/external/webkit/WebKit/chromium/src/ |
H A D | StorageEventDispatcherImpl.cpp | 65 const HashSet<Page*>& pages = m_pageGroup->pages(); local 66 HashSet<Page*>::const_iterator end = pages.end(); 67 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
H A D | StorageAreaProxy.cpp | 130 const HashSet<Page*>& pages = page->group().pages(); local 131 HashSet<Page*>::const_iterator end = pages.end(); 132 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
/external/webkit/WebCore/storage/ |
H A D | StorageEventDispatcher.cpp | 65 const HashSet<Page*>& pages = page->group().pages(); local 66 HashSet<Page*>::const_iterator end = pages.end(); 67 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
/external/kernel-headers/original/linux/ |
H A D | vmalloc.h | 12 #define VM_MAP 0x00000004 /* vmap()ed pages */ 14 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 22 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 29 struct page **pages; member in struct:vm_struct 51 extern void *vmap(struct page **pages, unsigned int count, 69 struct page ***pages);
|
H A D | pagemap.h | 52 void release_pages(struct page **pages, int nr, int cold); 80 unsigned int nr_pages, struct page **pages); 82 unsigned int nr_pages, struct page **pages); 84 int tag, unsigned int nr_pages, struct page **pages); 100 struct list_head *pages, filler_t *filler, void *data); 175 * This assumes that two userspace pages are always sufficient. That's
|
H A D | nfs_xdr.h | 245 struct page ** pages; member in struct:nfs_readargs 264 struct page ** pages; member in struct:nfs_writeargs 371 struct page ** pages; member in struct:nfs_readdirargs 377 struct page ** pages; member in struct:nfs3_getaclargs 385 struct page ** pages; member in struct:nfs3_setaclargs 397 struct page ** pages; member in struct:nfs_readlinkargs 474 struct page ** pages; member in struct:nfs3_readdirargs 492 struct page ** pages; member in struct:nfs3_readlinkargs 614 struct page ** pages; /* zero-copy data */ member in struct:nfs4_readdir_arg 628 struct page ** pages; /* zer member in struct:nfs4_readlink 729 struct list_head pages; /* Coalesced read requests */ member in struct:nfs_read_data 748 struct list_head pages; /* Coalesced requests we wish to flush */ member in struct:nfs_write_data [all...] |
H A D | swap.h | 26 * for the type means that the maximum number of swapcache pages is 27 bits 93 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 108 * Max bad pages in the new format.. 143 unsigned int pages; member in struct:swap_info_struct 285 #define free_pages_and_swap_cache(pages, nr) \ 286 release_pages((pages), (nr), 0);
|
/external/kernel-headers/original/asm-generic/ |
H A D | tlb.h | 44 struct page * pages[FREE_PTE_NR]; member in struct:mmu_gather 76 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 tlb->pages[tlb->nr++] = page; 118 * unmapping already-unmapped pages, which happens quite a lot.
|
/external/webkit/JavaScriptCore/jit/ |
H A D | ExecutableAllocatorWin.cpp | 54 VirtualFree(alloc.pages, 0, MEM_RELEASE);
|
H A D | ExecutableAllocator.h | 86 char* pages; member in struct:JSC::ExecutablePool::Allocation 260 m_freePtr = mem.pages; 271 if (!result.pages) 277 m_freePtr = result.pages + n; 278 m_end = result.pages + allocSize; 282 return result.pages;
|
H A D | ExecutableAllocatorPosix.cpp | 56 int result = munmap(alloc.pages, alloc.size);
|
/external/bluetooth/glib/tests/ |
H A D | slice-threadinit.c | 23 #define N_PAGES (101) /* number of pages to sample */ 34 } pages[N_PAGES] = { { NULL, }, }; variable in typeref:struct:__anon656 63 if (pages[j].page == p) 81 /* allocate blocks from N_PAGES different pages */ 87 if (pages[j].page == p) 94 pages[j].page = p; 95 pages[j].sample = b; 101 /* ensure that we can allocate from known pages */ 156 /* ensure that we can allocate from known pages */
|
/external/webkit/WebCore/page/wince/ |
H A D | FrameWince.cpp | 64 void computePageRectsForFrame(Frame* frame, const IntRect& printRect, float headerHeight, float footerHeight, float userScaleFactor, Vector<IntRect>& pages, int& outPageHeight) argument 68 pages.clear(); 111 pages.append(IntRect(0, printedPagesHeight, currPageWidth, currPageHeight));
|
/external/kernel-headers/original/linux/mtd/ |
H A D | bbm.h | 23 * @pages: the page(s) where we find the bbt, used with 25 * then we store the found bbts pages here. 46 int pages[NAND_MAX_CHIPS]; member in struct:nand_bbt_descr 76 /* Search good / bad pattern through all pages of a block */
|
/external/kernel-headers/original/linux/sunrpc/ |
H A D | xdr.h | 41 * pages. 43 * main interest is for appending padding to the pages in order to 48 * operations and/or has a need for scatter/gather involving pages. 54 struct page ** pages; /* Array of contiguous pages */ member in struct:xdr_buf 192 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
|
/external/quake/quake/src/WinQuake/ |
H A D | vid_ext.cpp | 50 int pages[3]; // either 2 or 3 is valid
member in struct:__anon5652 83 int num_pages; // number of complete frame buffer pages (+29)
385 vesa_extra[nummodes].pages[0] = 0;
386 vesa_extra[nummodes].pages[1] = modeinfo.pagesize;
387 vesa_extra[nummodes].pages[2] = modeinfo.pagesize * 2;
406 vesa_extra[nummodes].pages[0] = 0;
413 vesa_extra[nummodes].pages[1] = modeinfo.pagesize;
414 vesa_extra[nummodes].pages[2] = modeinfo.pagesize * 2;
503 // force to one page if the adapter reports it doesn't support more pages
565 printf(" num pages [all...] |
/external/webkit/WebCore/bindings/js/ |
H A D | ScriptController.cpp | 260 const HashSet<Page*>& pages = page->group().pages(); local 261 HashSet<Page*>::const_iterator end = pages.end(); 262 for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
|
/external/webkit/WebKit/qt/Api/ |
H A D | qwebsettings.h | 118 static void setMaximumPagesInCache(int pages);
|
/external/freetype/ |
H A D | Android.mk | 12 # when loading complex pages in the browser
|