feat: brouillon de gestion de mémoire paginée avec usage de TAILQ de Freebsd, ne fonctionne pas...@ suivre
This commit is contained in:
parent
f04bb66ec8
commit
6acbddab8d
|
@ -4,6 +4,7 @@
|
|||
#include "types.h"
|
||||
|
||||
/******************************************************************************/
|
||||
#define halt() asm("hlt"::)
|
||||
|
||||
#define sti() asm("sti"::)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
/* COS2000 - Compatible Operating System - LGPL v3 - Hordé Nicolas */
|
||||
/* */
|
||||
#include "types.h"
|
||||
#include "queue.h"
|
||||
|
||||
#define TOPAGE(addr) (addr) >> 12
|
||||
|
||||
|
@ -12,13 +13,19 @@
|
|||
|
||||
#define IDT_ADDR 0x00000000 /* adresse de la IDT */
|
||||
#define GDT_ADDR 0x00000800 /* adresse de la GDT */
|
||||
#define KERNEL_PGD_ADDR 0x00001000 /* adresse de la page directory */
|
||||
#define KERNEL_PD_ADDR 0x00001000 /* adresse de la page directory */
|
||||
#define KERNEL_STACK_ADDR 0x0009FFFF /* adresse de la pile du kernel */
|
||||
#define KERNEL_CODE_ADDR 0x00100000
|
||||
#define KERNEL_CODE_ADDR 0x00100000 /* adresse du code du noyau */
|
||||
#define KERNEL_PAGES 0x00800000 /* adresse des pages */
|
||||
#define KERNEL_HEAP 0x10000000 /* adresse du heap */
|
||||
#define USER_CODE 0x40000000 /* adresse du code utilisateur */
|
||||
#define USER_STACK 0xE0000000 /* adresse de la pile utilisateur */
|
||||
|
||||
/* limites de la mémoire 32 bits */
|
||||
#define MAXMEMSIZE 0x100000000
|
||||
#define MAXMEMPAGE 1024*1024
|
||||
#define MAXHEAPSIZE USER_CODE-KERNEL_HEAP
|
||||
#define MAXPAGESSIZE KERNEL_HEAP-KERNEL_PAGES
|
||||
|
||||
/* page directory */
|
||||
#define PAGE_PRESENT 0b000000001/* page directory / table */
|
||||
|
@ -34,7 +41,93 @@
|
|||
#define PAGE_DIRTY 0b001000000 /* page écrite */
|
||||
#define PAGE_GLOBAL 0b100000000 /* évite que le TLB mette à jour l'adresse dans le cache si CR4 est remis à zéro (NECESSITE CR4) */
|
||||
|
||||
#define MALLOC_MINIMUM 16
|
||||
|
||||
/* Malloc, pour l'attribution de mémoire en heap */
|
||||
typedef struct tmalloc {
|
||||
u32 size:31;
|
||||
u32 used:1;
|
||||
} __attribute__ ((packed)) tmalloc;
|
||||
|
||||
/* Page, pour la gestion de la mémoire virtuelle */
|
||||
typedef struct page {
|
||||
u8 *vaddr;
|
||||
u8 *paddr;
|
||||
TAILQ_ENTRY(page) tailq;
|
||||
} __attribute__ ((packed)) page;
|
||||
|
||||
typedef TAILQ_HEAD(page_s, page) page_t;
|
||||
|
||||
/* Page directory, pour la gestion de la mémoire virtuelle */
|
||||
typedef struct pd {
|
||||
page *addr;
|
||||
TAILQ_ENTRY(pd) tailq;
|
||||
} __attribute__ ((packed)) pd;
|
||||
|
||||
typedef TAILQ_HEAD(pd_s, pd) pd_t;
|
||||
|
||||
/* vaddrrange, pour la gestion des pages de la mémoire virtuelle */
|
||||
typedef struct vrange {
|
||||
u8 *vaddrlow;
|
||||
u8 *vaddrhigh;
|
||||
TAILQ_ENTRY(vrange) tailq;
|
||||
} __attribute__ ((packed)) vrange;
|
||||
|
||||
typedef TAILQ_HEAD(vrange_s, vrange) vrange_t;
|
||||
|
||||
void panic(u8 *string);
|
||||
void memset(void *dst, u8 val, u32 count,u32 size);
|
||||
void memcpy(void *src, void *dst, u32 count, u32 size);
|
||||
u32 memcmp(void *src, void *dst, u32 count, u32 size);
|
||||
u64 getmemoryfree(void);
|
||||
u64 physical_getmemorysize();
|
||||
void physical_page_use(u32 page);
|
||||
void physical_page_free(u32 page);
|
||||
void physical_range_use(u64 addr,u64 len);
|
||||
void physical_range_free(u64 addr,u64 len);
|
||||
u8* physical_page_getfree(void);
|
||||
void physical_init(void);
|
||||
void initpaging(void);
|
||||
void virtual_init(void);
|
||||
|
||||
/*
|
||||
Fonction à ajouter...pour gestion mémoire virtuelle
|
||||
u8* virtual_to_physical(u8 *vaddr)
|
||||
|
||||
pd *virtual_pd_create(void)
|
||||
|
||||
void virtual_pd_destroy(pd *dst)
|
||||
|
||||
void virtual_pd_page_remove(pd *dst, u8* vaddr)
|
||||
|
||||
void virtual_pd_page_add(pd *dst, u8* vaddr, u8 * paddr, u32 flags)
|
||||
|
||||
void virtual_range_use(pd *dst, u8 vaddr, u8 paddr, u8 len)
|
||||
|
||||
void virtual_range_free(pd *dst, u8 vaddr, u8 len)
|
||||
|
||||
void virtual_range_new(pd *dst, u8 vaddr, u8 len)
|
||||
|
||||
page *virtual_page_getfree(void)
|
||||
|
||||
void virtual_page_free(u8* vaddr)
|
||||
|
||||
void virtual_page_use(u8* vaddr)
|
||||
|
||||
void virtual_init(void)
|
||||
|
||||
|
||||
|
||||
|
||||
void virtual_range_use_kernel(u8 vaddr, u8 paddr, u8 len)
|
||||
|
||||
void virtual_range_free_kernel(u8 vaddr, u8 len)
|
||||
|
||||
void virtual_range_new_kernel(u8 vaddr, u8 len)
|
||||
|
||||
void virtual_range_use_current(u8 vaddr, u8 paddr, u8 len)
|
||||
|
||||
void virtual_range_free_current(u8 vaddr, u8 len)
|
||||
|
||||
void virtual_range_new_current(u8 vaddr, u8 len)
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,694 @@
|
|||
/*-
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 4. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _SYS_QUEUE_H_
|
||||
#define _SYS_QUEUE_H_
|
||||
|
||||
//#include <sys/cdefs.h>
|
||||
|
||||
/*
|
||||
* This file defines four types of data structures: singly-linked lists,
|
||||
* singly-linked tail queues, lists and tail queues.
|
||||
*
|
||||
* A singly-linked list is headed by a single forward pointer. The elements
|
||||
* are singly linked for minimum space and pointer manipulation overhead at
|
||||
* the expense of O(n) removal for arbitrary elements. New elements can be
|
||||
* added to the list after an existing element or at the head of the list.
|
||||
* Elements being removed from the head of the list should use the explicit
|
||||
* macro for this purpose for optimum efficiency. A singly-linked list may
|
||||
* only be traversed in the forward direction. Singly-linked lists are ideal
|
||||
* for applications with large datasets and few or no removals or for
|
||||
* implementing a LIFO queue.
|
||||
*
|
||||
* A singly-linked tail queue is headed by a pair of pointers, one to the
|
||||
* head of the list and the other to the tail of the list. The elements are
|
||||
* singly linked for minimum space and pointer manipulation overhead at the
|
||||
* expense of O(n) removal for arbitrary elements. New elements can be added
|
||||
* to the list after an existing element, at the head of the list, or at the
|
||||
* end of the list. Elements being removed from the head of the tail queue
|
||||
* should use the explicit macro for this purpose for optimum efficiency.
|
||||
* A singly-linked tail queue may only be traversed in the forward direction.
|
||||
* Singly-linked tail queues are ideal for applications with large datasets
|
||||
* and few or no removals or for implementing a FIFO queue.
|
||||
*
|
||||
* A list is headed by a single forward pointer (or an array of forward
|
||||
* pointers for a hash table header). The elements are doubly linked
|
||||
* so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before
|
||||
* or after an existing element or at the head of the list. A list
|
||||
* may be traversed in either direction.
|
||||
*
|
||||
* A tail queue is headed by a pair of pointers, one to the head of the
|
||||
* list and the other to the tail of the list. The elements are doubly
|
||||
* linked so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before or
|
||||
* after an existing element, at the head of the list, or at the end of
|
||||
* the list. A tail queue may be traversed in either direction.
|
||||
*
|
||||
* For details on the use of these macros, see the queue(3) manual page.
|
||||
*
|
||||
*
|
||||
* SLIST LIST STAILQ TAILQ
|
||||
* _HEAD + + + +
|
||||
* _HEAD_INITIALIZER + + + +
|
||||
* _ENTRY + + + +
|
||||
* _INIT + + + +
|
||||
* _EMPTY + + + +
|
||||
* _FIRST + + + +
|
||||
* _NEXT + + + +
|
||||
* _PREV - + - +
|
||||
* _LAST - - + +
|
||||
* _FOREACH + + + +
|
||||
* _FOREACH_FROM + + + +
|
||||
* _FOREACH_SAFE + + + +
|
||||
* _FOREACH_FROM_SAFE + + + +
|
||||
* _FOREACH_REVERSE - - - +
|
||||
* _FOREACH_REVERSE_FROM - - - +
|
||||
* _FOREACH_REVERSE_SAFE - - - +
|
||||
* _FOREACH_REVERSE_FROM_SAFE - - - +
|
||||
* _INSERT_HEAD + + + +
|
||||
* _INSERT_BEFORE - + - +
|
||||
* _INSERT_AFTER + + + +
|
||||
* _INSERT_TAIL - - + +
|
||||
* _CONCAT - - + +
|
||||
* _REMOVE_AFTER + - + -
|
||||
* _REMOVE_HEAD + - + -
|
||||
* _REMOVE + + + +
|
||||
* _SWAP + + + +
|
||||
*
|
||||
*/
|
||||
#ifdef QUEUE_MACRO_DEBUG
|
||||
/* Store the last 2 places the queue element or head was altered */
|
||||
struct qm_trace {
|
||||
unsigned long lastline;
|
||||
unsigned long prevline;
|
||||
const char *lastfile;
|
||||
const char *prevfile;
|
||||
};
|
||||
|
||||
#define TRACEBUF struct qm_trace trace;
|
||||
#define TRACEBUF_INITIALIZER { __FILE__, __LINE__, NULL, 0 } ,
|
||||
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
|
||||
#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
|
||||
|
||||
#define QMD_TRACE_HEAD(head) do { \
|
||||
(head)->trace.prevline = (head)->trace.lastline; \
|
||||
(head)->trace.prevfile = (head)->trace.lastfile; \
|
||||
(head)->trace.lastline = __LINE__; \
|
||||
(head)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TRACE_ELEM(elem) do { \
|
||||
(elem)->trace.prevline = (elem)->trace.lastline; \
|
||||
(elem)->trace.prevfile = (elem)->trace.lastfile; \
|
||||
(elem)->trace.lastline = __LINE__; \
|
||||
(elem)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define QMD_TRACE_ELEM(elem)
|
||||
#define QMD_TRACE_HEAD(head)
|
||||
#define QMD_SAVELINK(name, link)
|
||||
#define TRACEBUF
|
||||
#define TRACEBUF_INITIALIZER
|
||||
#define TRASHIT(x)
|
||||
#endif /* QUEUE_MACRO_DEBUG */
|
||||
|
||||
/*
|
||||
* Singly-linked List declarations.
|
||||
*/
|
||||
#define SLIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *slh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define SLIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define SLIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *sle_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked List functions.
|
||||
*/
|
||||
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
|
||||
|
||||
#define SLIST_FIRST(head) ((head)->slh_first)
|
||||
|
||||
#define SLIST_FOREACH(var, head, field) \
|
||||
for ((var) = SLIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_FOREACH_FROM(var, head, field) \
|
||||
for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \
|
||||
(var); \
|
||||
(var) = SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = SLIST_FIRST((head)); \
|
||||
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define SLIST_FOREACH_FROM_SAFE(var, head, field, tvar) \
|
||||
for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \
|
||||
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
|
||||
for ((varp) = &SLIST_FIRST((head)); \
|
||||
((var) = *(varp)) != NULL; \
|
||||
(varp) = &SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_INIT(head) do { \
|
||||
SLIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
|
||||
SLIST_NEXT((slistelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_HEAD(head, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
|
||||
SLIST_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
|
||||
|
||||
#define SLIST_REMOVE(head, elm, type, field) do { \
|
||||
QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
|
||||
if (SLIST_FIRST((head)) == (elm)) { \
|
||||
SLIST_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = SLIST_FIRST((head)); \
|
||||
while (SLIST_NEXT(curelm, field) != (elm)) \
|
||||
curelm = SLIST_NEXT(curelm, field); \
|
||||
SLIST_REMOVE_AFTER(curelm, field); \
|
||||
} \
|
||||
TRASHIT(*oldnext); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_REMOVE_AFTER(elm, field) do { \
|
||||
SLIST_NEXT(elm, field) = \
|
||||
SLIST_NEXT(SLIST_NEXT(elm, field), field); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_REMOVE_HEAD(head, field) do { \
|
||||
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_SWAP(head1, head2, type) do { \
|
||||
struct type *swap_first = SLIST_FIRST(head1); \
|
||||
SLIST_FIRST(head1) = SLIST_FIRST(head2); \
|
||||
SLIST_FIRST(head2) = swap_first; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue declarations.
|
||||
*/
|
||||
#define STAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *stqh_first;/* first element */ \
|
||||
struct type **stqh_last;/* addr of last next element */ \
|
||||
}
|
||||
|
||||
#define STAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).stqh_first }
|
||||
|
||||
#define STAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *stqe_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue functions.
|
||||
*/
|
||||
#define STAILQ_CONCAT(head1, head2) do { \
|
||||
if (!STAILQ_EMPTY((head2))) { \
|
||||
*(head1)->stqh_last = (head2)->stqh_first; \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
STAILQ_INIT((head2)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
|
||||
|
||||
#define STAILQ_FIRST(head) ((head)->stqh_first)
|
||||
|
||||
#define STAILQ_FOREACH(var, head, field) \
|
||||
for((var) = STAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = STAILQ_NEXT((var), field))
|
||||
|
||||
#define STAILQ_FOREACH_FROM(var, head, field) \
|
||||
for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \
|
||||
(var); \
|
||||
(var) = STAILQ_NEXT((var), field))
|
||||
|
||||
#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = STAILQ_FIRST((head)); \
|
||||
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define STAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \
|
||||
for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \
|
||||
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define STAILQ_INIT(head) do { \
|
||||
STAILQ_FIRST((head)) = NULL; \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_NEXT((tqelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
STAILQ_NEXT((elm), field) = NULL; \
|
||||
*(head)->stqh_last = (elm); \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_LAST(head, type, field) \
|
||||
(STAILQ_EMPTY((head)) ? NULL : \
|
||||
__containerof((head)->stqh_last, struct type, field.stqe_next))
|
||||
|
||||
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
|
||||
|
||||
#define STAILQ_REMOVE(head, elm, type, field) do { \
|
||||
QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
|
||||
if (STAILQ_FIRST((head)) == (elm)) { \
|
||||
STAILQ_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = STAILQ_FIRST((head)); \
|
||||
while (STAILQ_NEXT(curelm, field) != (elm)) \
|
||||
curelm = STAILQ_NEXT(curelm, field); \
|
||||
STAILQ_REMOVE_AFTER(head, curelm, field); \
|
||||
} \
|
||||
TRASHIT(*oldnext); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
|
||||
if ((STAILQ_NEXT(elm, field) = \
|
||||
STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_REMOVE_HEAD(head, field) do { \
|
||||
if ((STAILQ_FIRST((head)) = \
|
||||
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_SWAP(head1, head2, type) do { \
|
||||
struct type *swap_first = STAILQ_FIRST(head1); \
|
||||
struct type **swap_last = (head1)->stqh_last; \
|
||||
STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
STAILQ_FIRST(head2) = swap_first; \
|
||||
(head2)->stqh_last = swap_last; \
|
||||
if (STAILQ_EMPTY(head1)) \
|
||||
(head1)->stqh_last = &STAILQ_FIRST(head1); \
|
||||
if (STAILQ_EMPTY(head2)) \
|
||||
(head2)->stqh_last = &STAILQ_FIRST(head2); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/*
|
||||
* List declarations.
|
||||
*/
|
||||
#define LIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *lh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define LIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define LIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *le_next; /* next element */ \
|
||||
struct type **le_prev; /* address of previous next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* List functions.
|
||||
*/
|
||||
|
||||
#if (defined(_KERNEL) && defined(INVARIANTS))
|
||||
#define QMD_LIST_CHECK_HEAD(head, field) do { \
|
||||
if (LIST_FIRST((head)) != NULL && \
|
||||
LIST_FIRST((head))->field.le_prev != \
|
||||
&LIST_FIRST((head))) \
|
||||
panic("Bad list head %p first->prev != head", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_LIST_CHECK_NEXT(elm, field) do { \
|
||||
if (LIST_NEXT((elm), field) != NULL && \
|
||||
LIST_NEXT((elm), field)->field.le_prev != \
|
||||
&((elm)->field.le_next)) \
|
||||
panic("Bad link elm %p next->prev != elm", (elm)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_LIST_CHECK_PREV(elm, field) do { \
|
||||
if (*(elm)->field.le_prev != (elm)) \
|
||||
panic("Bad link elm %p prev->next != elm", (elm)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define QMD_LIST_CHECK_HEAD(head, field)
|
||||
#define QMD_LIST_CHECK_NEXT(elm, field)
|
||||
#define QMD_LIST_CHECK_PREV(elm, field)
|
||||
#endif /* (_KERNEL && INVARIANTS) */
|
||||
|
||||
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
|
||||
|
||||
#define LIST_FIRST(head) ((head)->lh_first)
|
||||
|
||||
#define LIST_FOREACH(var, head, field) \
|
||||
for ((var) = LIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = LIST_NEXT((var), field))
|
||||
|
||||
#define LIST_FOREACH_FROM(var, head, field) \
|
||||
for ((var) = ((var) ? (var) : LIST_FIRST((head))); \
|
||||
(var); \
|
||||
(var) = LIST_NEXT((var), field))
|
||||
|
||||
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = LIST_FIRST((head)); \
|
||||
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define LIST_FOREACH_FROM_SAFE(var, head, field, tvar) \
|
||||
for ((var) = ((var) ? (var) : LIST_FIRST((head))); \
|
||||
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define LIST_INIT(head) do { \
|
||||
LIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
|
||||
QMD_LIST_CHECK_NEXT(listelm, field); \
|
||||
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
|
||||
LIST_NEXT((listelm), field)->field.le_prev = \
|
||||
&LIST_NEXT((elm), field); \
|
||||
LIST_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
QMD_LIST_CHECK_PREV(listelm, field); \
|
||||
(elm)->field.le_prev = (listelm)->field.le_prev; \
|
||||
LIST_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.le_prev = (elm); \
|
||||
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_HEAD(head, elm, field) do { \
|
||||
QMD_LIST_CHECK_HEAD((head), field); \
|
||||
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
|
||||
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
|
||||
LIST_FIRST((head)) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
|
||||
|
||||
#define LIST_PREV(elm, head, type, field) \
|
||||
((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \
|
||||
__containerof((elm)->field.le_prev, struct type, field.le_next))
|
||||
|
||||
#define LIST_REMOVE(elm, field) do { \
|
||||
QMD_SAVELINK(oldnext, (elm)->field.le_next); \
|
||||
QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
|
||||
QMD_LIST_CHECK_NEXT(elm, field); \
|
||||
QMD_LIST_CHECK_PREV(elm, field); \
|
||||
if (LIST_NEXT((elm), field) != NULL) \
|
||||
LIST_NEXT((elm), field)->field.le_prev = \
|
||||
(elm)->field.le_prev; \
|
||||
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
|
||||
TRASHIT(*oldnext); \
|
||||
TRASHIT(*oldprev); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_SWAP(head1, head2, type, field) do { \
|
||||
struct type *swap_tmp = LIST_FIRST((head1)); \
|
||||
LIST_FIRST((head1)) = LIST_FIRST((head2)); \
|
||||
LIST_FIRST((head2)) = swap_tmp; \
|
||||
if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
|
||||
swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
|
||||
if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
|
||||
swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Tail queue declarations.
|
||||
*/
|
||||
#define TAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *tqh_first; /* first element */ \
|
||||
struct type **tqh_last; /* addr of last next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
#define TAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).tqh_first, TRACEBUF_INITIALIZER }
|
||||
|
||||
#define TAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *tqe_next; /* next element */ \
|
||||
struct type **tqe_prev; /* address of previous next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
/*
|
||||
* Tail queue functions.
|
||||
*/
|
||||
#if (defined(_KERNEL) && defined(INVARIANTS))
|
||||
#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
|
||||
if (!TAILQ_EMPTY(head) && \
|
||||
TAILQ_FIRST((head))->field.tqe_prev != \
|
||||
&TAILQ_FIRST((head))) \
|
||||
panic("Bad tailq head %p first->prev != head", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
|
||||
if (*(head)->tqh_last != NULL) \
|
||||
panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
|
||||
if (TAILQ_NEXT((elm), field) != NULL && \
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev != \
|
||||
&((elm)->field.tqe_next)) \
|
||||
panic("Bad link elm %p next->prev != elm", (elm)); \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
|
||||
if (*(elm)->field.tqe_prev != (elm)) \
|
||||
panic("Bad link elm %p prev->next != elm", (elm)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define QMD_TAILQ_CHECK_HEAD(head, field)
|
||||
#define QMD_TAILQ_CHECK_TAIL(head, headname)
|
||||
#define QMD_TAILQ_CHECK_NEXT(elm, field)
|
||||
#define QMD_TAILQ_CHECK_PREV(elm, field)
|
||||
#endif /* (_KERNEL && INVARIANTS) */
|
||||
|
||||
#define TAILQ_CONCAT(head1, head2, field) do { \
|
||||
if (!TAILQ_EMPTY(head2)) { \
|
||||
*(head1)->tqh_last = (head2)->tqh_first; \
|
||||
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
|
||||
(head1)->tqh_last = (head2)->tqh_last; \
|
||||
TAILQ_INIT((head2)); \
|
||||
QMD_TRACE_HEAD(head1); \
|
||||
QMD_TRACE_HEAD(head2); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
|
||||
|
||||
#define TAILQ_FIRST(head) ((head)->tqh_first)
|
||||
|
||||
#define TAILQ_FOREACH(var, head, field) \
|
||||
for ((var) = TAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = TAILQ_NEXT((var), field))
|
||||
|
||||
#define TAILQ_FOREACH_FROM(var, head, field) \
|
||||
for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \
|
||||
(var); \
|
||||
(var) = TAILQ_NEXT((var), field))
|
||||
|
||||
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = TAILQ_FIRST((head)); \
|
||||
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \
|
||||
for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \
|
||||
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
|
||||
for ((var) = TAILQ_LAST((head), headname); \
|
||||
(var); \
|
||||
(var) = TAILQ_PREV((var), headname, field))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE_FROM(var, head, headname, field) \
|
||||
for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \
|
||||
(var); \
|
||||
(var) = TAILQ_PREV((var), headname, field))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
|
||||
for ((var) = TAILQ_LAST((head), headname); \
|
||||
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE_FROM_SAFE(var, head, headname, field, tvar) \
|
||||
for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \
|
||||
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define TAILQ_INIT(head) do { \
|
||||
TAILQ_FIRST((head)) = NULL; \
|
||||
(head)->tqh_last = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_NEXT(listelm, field); \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else { \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
TAILQ_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_PREV(listelm, field); \
|
||||
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
||||
TAILQ_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.tqe_prev = (elm); \
|
||||
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_HEAD(head, field); \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
|
||||
TAILQ_FIRST((head))->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
TAILQ_FIRST((head)) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
QMD_TAILQ_CHECK_TAIL(head, field); \
|
||||
TAILQ_NEXT((elm), field) = NULL; \
|
||||
(elm)->field.tqe_prev = (head)->tqh_last; \
|
||||
*(head)->tqh_last = (elm); \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_LAST(head, headname) \
|
||||
(*(((struct headname *)((head)->tqh_last))->tqh_last))
|
||||
|
||||
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
||||
|
||||
#define TAILQ_PREV(elm, headname, field) \
|
||||
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
|
||||
|
||||
#define TAILQ_REMOVE(head, elm, field) do { \
|
||||
QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
|
||||
QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
|
||||
QMD_TAILQ_CHECK_NEXT(elm, field); \
|
||||
QMD_TAILQ_CHECK_PREV(elm, field); \
|
||||
if ((TAILQ_NEXT((elm), field)) != NULL) \
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
(elm)->field.tqe_prev; \
|
||||
else { \
|
||||
(head)->tqh_last = (elm)->field.tqe_prev; \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
|
||||
TRASHIT(*oldnext); \
|
||||
TRASHIT(*oldprev); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_SWAP(head1, head2, type, field) do { \
|
||||
struct type *swap_first = (head1)->tqh_first; \
|
||||
struct type **swap_last = (head1)->tqh_last; \
|
||||
(head1)->tqh_first = (head2)->tqh_first; \
|
||||
(head1)->tqh_last = (head2)->tqh_last; \
|
||||
(head2)->tqh_first = swap_first; \
|
||||
(head2)->tqh_last = swap_last; \
|
||||
if ((swap_first = (head1)->tqh_first) != NULL) \
|
||||
swap_first->field.tqe_prev = &(head1)->tqh_first; \
|
||||
else \
|
||||
(head1)->tqh_last = &(head1)->tqh_first; \
|
||||
if ((swap_first = (head2)->tqh_first) != NULL) \
|
||||
swap_first->field.tqe_prev = &(head2)->tqh_first; \
|
||||
else \
|
||||
(head2)->tqh_last = &(head2)->tqh_first; \
|
||||
} while (0)
|
||||
|
||||
#endif /* !_SYS_QUEUE_H_ */
|
215
lib/memory.c
215
lib/memory.c
|
@ -4,16 +4,87 @@
|
|||
#include "types.h"
|
||||
#include "memory.h"
|
||||
#include "multiboot2.h"
|
||||
#include "queue.h"
|
||||
#include "asm.h"
|
||||
|
||||
static u32 *pd0 = (u32 *) KERNEL_PGD_ADDR; /* page directory */
|
||||
static u8 *pg0 = (u8 *) 0; /* page 0 */
|
||||
static u8 *pg1 = (u8 *) (PAGESIZE*PAGENUMBER); /* page 1 */
|
||||
static u8 bitmap[MAXMEMPAGE / 8];
|
||||
static pd *kerneldirectory=NULL; /* pointeur vers le page directory noyau */
|
||||
static u8 *kernelheap=NULL; /* pointeur vers le heap noyau */
|
||||
static u8 bitmap[MAXMEMPAGE / 8]; /* bitmap */
|
||||
static vrange_t freepages;
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Retourne la taille de la mémoire (selon grub) */
|
||||
/* Erreur fatale */
|
||||
void panic(u8 *string)
|
||||
{
|
||||
printf("KERNEL PANIC: %s\r\nSysteme arrete...\n");
|
||||
halt();
|
||||
}
|
||||
|
||||
u64 getmemorysize()
|
||||
/*******************************************************************************/
|
||||
/* Alloue plusieurs pages virtuelles (size) pour le heap du noyau */
|
||||
|
||||
tmalloc *mallocpage(u8 size)
|
||||
{
|
||||
tmalloc *chunk;
|
||||
u8 *paddr;
|
||||
u32 realsize=size * PAGESIZE;
|
||||
if ((kernelheap - KERNEL_HEAP + realsize) > MAXHEAPSIZE)
|
||||
panic("Plus de memoire noyau heap disponible a allouer !\n");
|
||||
chunk = (tmalloc *) kernelheap;
|
||||
virtual_range_new_kernel(kernelheap, realsize);
|
||||
chunk->size = realsize;
|
||||
chunk->used = 0;
|
||||
return chunk;
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Alloue de la mémoire virtuelle au noyau de façon dynamique (heap) */
|
||||
|
||||
void *vmalloc(u32 size)
|
||||
{
|
||||
u32 realsize;
|
||||
tmalloc *chunk, *new;
|
||||
realsize = sizeof(tmalloc) + size;
|
||||
if (realsize < MALLOC_MINIMUM)
|
||||
realsize = MALLOC_MINIMUM;
|
||||
chunk = KERNEL_HEAP;
|
||||
while (chunk->used || chunk->size < realsize) {
|
||||
if (chunk->size == 0)
|
||||
panic(sprintf("Element du heap %x defectueux avec une taille nulle (heap %x) !",chunk, kernelheap));
|
||||
chunk = chunk + chunk->size;
|
||||
if (chunk == (tmalloc *) kernelheap)
|
||||
mallocpage((realsize / PAGESIZE) + 1);
|
||||
else if (chunk > (tmalloc *) kernelheap)
|
||||
panic (sprintf("Element du heap %x depassant la limite %x !",chunk, kernelheap));
|
||||
}
|
||||
if (chunk->size - realsize < MALLOC_MINIMUM)
|
||||
chunk->used = 1;
|
||||
else {
|
||||
new = chunk + realsize;
|
||||
new->size = chunk->size - realsize;
|
||||
new->used = 0;
|
||||
chunk->size = realsize;
|
||||
chunk->used = 1;
|
||||
}
|
||||
return (u8 *) chunk + sizeof(tmalloc);
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Libère de la mémoire virtuelle depuis le heap noyau */
|
||||
|
||||
void vmfree(void *vaddr)
|
||||
{
|
||||
tmalloc *chunk, *new;
|
||||
chunk = (tmalloc *) (vaddr - sizeof(tmalloc));
|
||||
chunk->used = 0;
|
||||
while ((new = (tmalloc *) chunk + chunk->size) && new < (tmalloc *) kernelheap && new->used == 0)
|
||||
chunk->size += new->size;
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Retourne la taille de la mémoire physique (selon grub) */
|
||||
|
||||
u64 physical_getmemorysize()
|
||||
{
|
||||
u64 maxaddr=0;
|
||||
struct multiboot_tag_mmap *tag=getgrubinfo_mem();
|
||||
|
@ -26,59 +97,59 @@ u64 getmemorysize()
|
|||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Retourne que la page actuelle est occupée */
|
||||
/* Retourne que la page physique actuelle est occupée */
|
||||
|
||||
void bitmap_page_use(page)
|
||||
void physical_page_use(u32 page)
|
||||
{
|
||||
bitmap[((u32) page)/8] |= (1 << (((u32) page)%8));
|
||||
bitmap[(page/8)] |= (1 << (page%8));
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Retourne que la page actuelle est libre */
|
||||
/* Retourne que la page physique actuelle est libre */
|
||||
|
||||
void bitmap_page_free(page)
|
||||
void physical_page_free(u32 page)
|
||||
{
|
||||
bitmap[((u32) page)/8] &= ~(1 << (((u32) page)%8));
|
||||
bitmap[(page/8)] &= ~(1 << (page%8));
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Reserve un espace mémoire dans le bitmap */
|
||||
/* Reserve un espace mémoire physique dans le bitmap */
|
||||
|
||||
void bitmap_page_setused(u64 addr,u64 len)
|
||||
void physical_range_use(u64 addr,u64 len)
|
||||
{
|
||||
u32 nbpage=TOPAGE(len);
|
||||
u32 pagesrc=TOPAGE(addr);
|
||||
if (len & 0b1111111111 > 0)
|
||||
nbpage++;
|
||||
if (addr>0xFFFFFFFF)
|
||||
if (addr>=MAXMEMSIZE)
|
||||
return;
|
||||
if (len>0xFFFFFFFF)
|
||||
len=0xFFFFFFFF;
|
||||
if (addr+len>=MAXMEMSIZE)
|
||||
len=MAXMEMSIZE-addr-1;
|
||||
for(u32 page=pagesrc;page<pagesrc+nbpage;page++)
|
||||
bitmap_page_use(page);
|
||||
physical_page_use(page);
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Indique un espace mémoire libre dans le bitmap */
|
||||
/* Libère un espace mémoire physique dans le bitmap */
|
||||
|
||||
void bitmap_page_setfree(u64 addr,u64 len)
|
||||
void physical_range_free(u64 addr,u64 len)
|
||||
{
|
||||
u32 nbpage=TOPAGE(len);
|
||||
u32 pagesrc=TOPAGE(addr);
|
||||
if (len & 0b1111111111 > 0)
|
||||
nbpage++;
|
||||
if (addr>0xFFFFFFFF)
|
||||
if (addr>=MAXMEMSIZE)
|
||||
return;
|
||||
if (len>0xFFFFFFFF)
|
||||
len=0xFFFFFFFF;
|
||||
if (addr+len>=MAXMEMSIZE)
|
||||
len=MAXMEMSIZE-addr-1;
|
||||
for(u32 page=pagesrc;page<pagesrc+nbpage;page++)
|
||||
bitmap_page_free(page);
|
||||
physical_page_free(page);
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Retourne une page libre */
|
||||
/* Retourne une page physique libre */
|
||||
|
||||
u8* bitmap_page_getonefree(void)
|
||||
u8* physical_page_getfree(void)
|
||||
{
|
||||
u8 byte, bit;
|
||||
u32 page = 0;
|
||||
|
@ -87,7 +158,7 @@ u8* bitmap_page_getonefree(void)
|
|||
for (bit = 0; bit < 8; bit++)
|
||||
if (!(bitmap[byte] & (1 << bit))) {
|
||||
page = 8 * byte + bit;
|
||||
bitmap_page_use(page);
|
||||
physical_page_use(page);
|
||||
return (u8 *) (page * PAGESIZE);
|
||||
}
|
||||
return NULL;
|
||||
|
@ -109,9 +180,9 @@ u64 getmemoryfree(void)
|
|||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Initialisation du bitmap */
|
||||
/* Initialisation du bitmap pour la gestion physique de la mémoire */
|
||||
|
||||
void bitmap_init()
|
||||
void physical_init(void)
|
||||
{
|
||||
u64 page;
|
||||
for (page=0; page < sizeof(bitmap); page++)
|
||||
|
@ -121,10 +192,76 @@ void bitmap_init()
|
|||
for (mmap = ((struct multiboot_tag_mmap *) tag)->entries;(u8 *) mmap < (u8 *) tag + tag->size; mmap = (multiboot_memory_map_t *)
|
||||
((unsigned long) mmap + ((struct multiboot_tag_mmap *) tag)->entry_size))
|
||||
if (mmap->type==1)
|
||||
bitmap_page_setfree(mmap->addr,mmap->len);
|
||||
physical_range_free(mmap->addr,mmap->len);
|
||||
else
|
||||
bitmap_page_setused(mmap->addr,mmap->len);
|
||||
bitmap_page_setused(0x0,KERNELSIZE);
|
||||
physical_range_use(mmap->addr,mmap->len);
|
||||
//physical_range_use(0x0,KERNELSIZE);
|
||||
}
|
||||
/*******************************************************************************/
|
||||
/* Allocation de page virtuelle de mémoire */
|
||||
page *virtual_page_getfree(void)
|
||||
{
|
||||
page *pg;
|
||||
vrange *vpages;
|
||||
u8 *vaddr, *paddr;
|
||||
paddr = physical_page_getfree();
|
||||
if (paddr == NULL)
|
||||
panic ("Plus de memoire physique disponible !\n");
|
||||
if (TAILQ_EMPTY(&freepages)
|
||||
panic ("Plus de place disponible dans la reserve de page !\n");
|
||||
vpages = TAILQ_FIRST(&freepages);
|
||||
vaddr = vpages->vaddrlow;
|
||||
vpages->vaddrlow += PAGESIZE;
|
||||
if (pages->vaddrlow == pages->vaddrhigh) {
|
||||
TAILQ_REMOVE(&freepages, vpages, tailq);
|
||||
vfree(vpages);
|
||||
}
|
||||
pd0_add_page(v_addr, p_addr, 0);*/
|
||||
virtual_pd_page_add(pd,vaddr,paddr, 0)
|
||||
pg = (page*) vmalloc(sizeof(page));
|
||||
pg->vaddr = vaddr;
|
||||
pg->paddr = paddr;
|
||||
return pg;
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Création d'un directory pour la gestion virtuelle de la mémoire */
|
||||
|
||||
pd *virtual_pd_create()
|
||||
{
|
||||
pd *new;
|
||||
u32 *pdir,pd0;
|
||||
u32 i;
|
||||
pd = (pd *) vmalloc(sizeof(pd));
|
||||
pd->addr = virtual_page_getfree();
|
||||
if (kerneldirectory!=NULL)
|
||||
{
|
||||
pdir = (u32 *) pd->base->vaddr;
|
||||
pd0 = (u32 *) kerneldirectory->base->vaddr;
|
||||
for (i = 0; i < 256; i++)
|
||||
pdir[i] = pd0[i];
|
||||
for (i = 256; i < 1023; i++)
|
||||
pdir[i] = 0;
|
||||
pdir[1023] = ((u32) pd->base->p_addr | (PG_PRESENT | PG_WRITE));
|
||||
}
|
||||
TAILQ_INIT(&pd->addr);
|
||||
return pd;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
/* Initialisation d'une STAILQ pour la gestion virtuelle de la mémoire */
|
||||
|
||||
void virtual_init(void)
|
||||
{
|
||||
kernelheap = (u8 *) KERNEL_HEAP;
|
||||
vrange *vpages = (vrange*) vmalloc(sizeof(vrange));
|
||||
vpages->vaddrlow = (u8 *) KERNEL_HEAP;
|
||||
vpages->vaddrhigh = (u8 *) KERNEL_HEAP+MAXHEAPSIZE;
|
||||
TAILQ_INIT(&freepages);
|
||||
TAILQ_INSERT_TAIL(&freepages, vpages, tailq);
|
||||
kerneldirectory=virtual_pd_create();
|
||||
virtual_range_use_kernel(0x00000000, 0x00000000, KERNELSIZE);
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
|
@ -132,22 +269,16 @@ void bitmap_init()
|
|||
|
||||
void initpaging(void)
|
||||
{
|
||||
u16 i;
|
||||
pd0[0] = ((u32) pg0 | (PAGE_PRESENT | PAGE_WRITE | PAGE_4MB));
|
||||
pd0[1] = ((u32) pg1 | (PAGE_PRESENT | PAGE_WRITE | PAGE_4MB));
|
||||
for (i = 2; i < 1023; i++)
|
||||
pd0[i] = ((u32) pg1 + PAGESIZE * i) | (PAGE_PRESENT | PAGE_WRITE);
|
||||
|
||||
pd0[1023] = ((u32) pd0 | (PAGE_PRESENT | PAGE_WRITE));
|
||||
|
||||
asm("mov %[pd0_addr], %%eax \n \
|
||||
physical_init();
|
||||
virtual_init();
|
||||
asm("mov %[directory_addr], %%eax \n \
|
||||
mov %%eax, %%cr3 \n \
|
||||
mov %%cr4, %%eax \n \
|
||||
or $0x00000010, %%eax \n \
|
||||
mov %%eax, %%cr4 \n \
|
||||
mov %%cr0, %%eax \n \
|
||||
or $0x80000001, %%eax \n \
|
||||
mov %%eax, %%cr0"::[pd0_addr]"m"(pd0));
|
||||
mov %%eax, %%cr0"::[directory_addr]"m"(kerneldirectory->addr));
|
||||
}
|
||||
|
||||
/*******************************************************************************/
|
||||
|
|
|
@ -69,8 +69,7 @@ int main(u32 magic, u32 addr)
|
|||
ok();
|
||||
|
||||
print("\033[37m\033[0m -Initilisation de la pagination (PAGING)");
|
||||
bitmap_init();
|
||||
//initpaging();
|
||||
initpaging();
|
||||
ok();
|
||||
|
||||
print("\033[37m\033[0m -Initilisation des interruptions (IDT/PIC)");
|
||||
|
|
Loading…
Reference in New Issue