Skip to content

Commit 309d7b1

Browse files
committed
Fix: Process & Kernel PDs going out of sync
1 parent 2c35bdd commit 309d7b1

File tree

9 files changed

+103
-29
lines changed

9 files changed

+103
-29
lines changed

docs/notes/TODO.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,13 +156,13 @@
156156
| ::: unittests how are we going to mock, so for unittests something else | | |
157157
| ::: need to be done. | | |
158158
|-------------------------------------------------------------------------|-----------|------------|
159-
| [ ] Process management | 11 Apr 24 | |
159+
| [X] Process management | 11 Apr 24 | |
160160
| ::: [X] Process Killing: Need a way to pass exit code to parent process | 21 Apr 24 | 11 Nov 24 |
161161
| Possible solution. | | |
162162
| When a process ending it would add SIGCHILD signal for its parent | 21 Apr 24 | |
163163
| and the scheduler will make sure that the parent gets the message. | | |
164164
| Need a way to avoid creation of ZOMBIE processes. | | |
165-
| ::: [ ] Process Exit: Copy the Kernel PDEs from PD of the process to | 27 Apr 24 | |
165+
| ::: [X] Process Exit: Copy the Kernel PDEs from PD of the process to | 27 Apr 24 | 26 Dec 24 |
166166
| ::: PD of the Kernel. At the start we copy the Kernel PDEs but are not | | |
167167
| ::: copying it back. The Kernel memory mapping might have changed by | | |
168168
| ::: now, so this copying is needed. | | |

include/paging.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ typedef enum PagingMapFlags {
4242
typedef enum PagingOperationFlags {
4343
PG_NEWPD_FLAG_COPY_KERNEL_PAGES = (1 << 0),
4444
PG_NEWPD_FLAG_RECURSIVE_MAP = (1 << 1),
45-
PG_DELPD_FLAG_KEEP_KERNEL_PAGES = (1 << 2)
45+
PG_DELPD_FLAG_KEEP_KERNEL_PAGES = (1 << 2),
46+
PG_NEWPD_FLAG_CREATE_NEW = (1 << 3),
4647
} PagingOperationFlags;
4748

4849
// Physical start of the page frame 'pf'.
@@ -60,7 +61,8 @@ bool kpg_unmap (PageDirectory pd, PTR va);
6061
void* kpg_temporaryMap (Physical pa);
6162
void kpg_temporaryUnmap();
6263
bool kpg_doesMappingExists (PageDirectory pd, PTR va, Physical* pa);
63-
bool kpg_createNewPageDirectory (Physical* newPD, PagingOperationFlags flags);
64-
bool kpg_deletePageDirectory(Physical pd, PagingOperationFlags flags);
64+
bool kpg_setupPageDirectory (Physical* const pd, PagingOperationFlags flags,
65+
Physical const* const kernelPD);
66+
bool kpg_deletePageDirectory (Physical pd, PagingOperationFlags flags);
6567

6668
#endif // PAGING_H

include/process.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,3 +76,4 @@ UINT kprocess_getCurrentPID();
7676
bool kprocess_popEvent (UINT pid, KProcessEvent* ev);
7777
bool kprocess_pushEvent (UINT pid, UINT eventID, UINT eventData);
7878
KProcessSections* kprocess_getCurrentProcessDataSection();
79+
void kprocess_syncPD();

include/vmm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ PTR kvmm_findFree (VMemoryManager* vmm, SIZE szPages);
3232
PTR kvmm_memmap (VMemoryManager* vmm, PTR va, Physical const* const pa, SIZE szPages,
3333
VMemoryMemMapFlags flags, Physical* const outPA);
3434
bool kvmm_checkbounds (VMemoryManager* vmm, PTR addr);
35+
bool kvmm_isPageDirectoryDirty (VMemoryManager* const vmm);
3536

3637
#if defined(DEBUG) && defined(PORT_E9_ENABLED)
3738
void kvmm_printVASList (VMemoryManager* vmm);

include/vmm_struct.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ struct VMemoryManager {
2222
PTR end;
2323
bool isStaticAllocated;
2424
Physical parentProcessPD;
25+
bool isPageDirectoryDirty; // Process's PageDirectory has changed. Will be reset once read.
2526
KernelPhysicalMemoryRegions physicalRegion;
2627
ListNode head;
2728
};

src/kernel/vmm.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ static VMemoryAddressSpace* find_vas (VMemoryManager const* const vmm, PTR start
188188
return NULL;
189189
}
190190

191-
static bool commitVirtualPages (VMemoryManager const* const vmm, PTR vaStart,
191+
static bool commitVirtualPages (VMemoryManager* const vmm, PTR vaStart,
192192
Physical const* const paStart, SIZE numPages,
193193
const VMemoryAddressSpace* const vas, Physical* const outPA)
194194
{
@@ -223,6 +223,7 @@ static bool commitVirtualPages (VMemoryManager const* const vmm, PTR vaStart,
223223
}
224224

225225
kpg_temporaryUnmap();
226+
vmm->isPageDirectoryDirty = true; // PD has changed.
226227
return true;
227228
}
228229

@@ -503,11 +504,21 @@ bool kvmm_commitPage (VMemoryManager* vmm, PTR va)
503504
return true;
504505
}
505506

506-
Physical kvmm_getPageDirectory (const VMemoryManager* vmm)
507+
Physical kvmm_getPageDirectory (const VMemoryManager* const vmm)
507508
{
508509
FUNC_ENTRY ("vmm: %x", vmm);
509510

510511
k_assert (vmm != NULL, "VMM not provided");
511512

512513
return vmm->parentProcessPD;
513514
}
515+
516+
bool kvmm_isPageDirectoryDirty (VMemoryManager* const vmm)
517+
{
518+
FUNC_ENTRY ("vmm: %x", vmm);
519+
k_assert (vmm != NULL, "VMM not provided");
520+
521+
bool isDirty = vmm->isPageDirectoryDirty;
522+
vmm->isPageDirectoryDirty = false;
523+
return isDirty;
524+
}

src/kernel/x86/interrupts.c

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,11 +139,25 @@ void page_fault_handler (InterruptFrame *frame, UINT errorcode)
139139

140140
// Page faults can occur for various reasons, it could be permissions or if page frame is not
141141
// present. Commit a physical page only it there is not one already allocated.
142-
VMemoryManager* context = kvmm_checkbounds (g_kstate.context, fault_addr)
143-
? g_kstate.context
144-
: kprocess_getCurrentContext();
142+
VMemoryManager* context = kprocess_getCurrentContext();
143+
if (kvmm_checkbounds (g_kstate.context, fault_addr)) {
144+
context = g_kstate.context;
145+
146+
// Attempt 1: Check if Kernel PD has changed and a sync is pending.
147+
if (kvmm_isPageDirectoryDirty (context)) {
148+
INFO ("Kernel and Process PD out of sync. Syncing..");
149+
kprocess_syncPD();
150+
return; // then retry.
151+
}
152+
}
145153

154+
// Attempt 2 : Process PD is sync with the Kernel PD. Actually commit the faulting virtual
155+
// address.
146156
if (err->Present == false && kvmm_commitPage (context, fault_addr)) {
157+
// Kernel PD has changed. Sync all Page Directories to it.
158+
if (context == g_kstate.context) {
159+
kprocess_syncPD();
160+
}
147161
return; // then retry
148162
}
149163

src/kernel/x86/paging.c

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -425,35 +425,52 @@ bool kpg_doesMappingExists (PageDirectory pd, PTR va, Physical* pa)
425425
* @Input flags Flags that determine the setup of the Page directory.
426426
* @return True if success, false otherwise.
427427
**************************************************************************************************/
428-
bool kpg_createNewPageDirectory (Physical* newPD, PagingOperationFlags flags)
428+
bool kpg_setupPageDirectory (Physical* const pd, PagingOperationFlags flags,
429+
Physical const* const kernelPD)
429430
{
430-
FUNC_ENTRY ("newPD return addr: %px, Flags: %x", newPD, flags);
431+
FUNC_ENTRY ("PD addr: %px, Flags: %x, kernel PD: %px", pd, flags, kernelPD);
431432

432-
if (kpmm_alloc (newPD, 1, PMM_REGION_ANY) == false) {
433-
RETURN_ERROR (ERROR_PASSTHROUGH, false); // PMM alloc failure
434-
}
433+
k_assert (pd != NULL, "Invalid address.");
435434

436-
INFO ("New PD physical location: %px", newPD->val);
435+
PageDirectory l_pd = NULL;
436+
// ---------------------------------------------------------------
437+
// Create a new, empty Page Directory.
438+
// ---------------------------------------------------------------
439+
if (BIT_ISSET (flags, PG_NEWPD_FLAG_CREATE_NEW)) {
440+
if (kpmm_alloc (pd, 1, PMM_REGION_ANY) == false) {
441+
RETURN_ERROR (ERROR_PASSTHROUGH, false); // PMM alloc failure
442+
}
443+
INFO ("New PD physical location: %px", pd->val);
437444

438-
PageDirectory pd = s_internal_temporaryMap (*newPD);
439-
k_memset (pd, 0, CONFIG_PAGE_FRAME_SIZE_BYTES);
445+
l_pd = s_internal_temporaryMap (*pd);
446+
k_memset (l_pd, 0, CONFIG_PAGE_FRAME_SIZE_BYTES);
447+
} else {
448+
l_pd = s_internal_temporaryMap (*pd);
449+
}
440450

441-
// Temporary map this PD and copy kernel page table entries
451+
// ---------------------------------------------------------------
452+
// Copy kernel pages tables to the provided PD.
453+
// ---------------------------------------------------------------
442454
if (BIT_ISSET (flags, PG_NEWPD_FLAG_COPY_KERNEL_PAGES)) {
443-
PageDirectory currentPD = kpg_getcurrentpd();
455+
k_assert (kernelPD != NULL, "Invalid address.");
456+
457+
PageDirectory l_kernelPD = kpg_temporaryMap (*kernelPD);
444458
for (UINT pdi = KERNEL_PDE_INDEX; pdi < 1024; pdi++) {
445-
pd[pdi] = currentPD[pdi];
459+
l_pd[pdi] = l_kernelPD[pdi];
446460
}
461+
kpg_temporaryUnmap();
462+
}
447463

448-
if (BIT_ISSET (flags, PG_NEWPD_FLAG_RECURSIVE_MAP)) {
449-
pd[RECURSIVE_PDE_INDEX].pageTableFrame = PHYSICAL_TO_PAGEFRAME (newPD->val);
450-
}
464+
// ---------------------------------------------------------------
465+
// Recursive map
466+
// ---------------------------------------------------------------
467+
if (BIT_ISSET (flags, PG_NEWPD_FLAG_RECURSIVE_MAP)) {
468+
l_pd[RECURSIVE_PDE_INDEX].pageTableFrame = PHYSICAL_TO_PAGEFRAME (pd->val);
451469
}
452-
s_internal_temporaryUnmap();
453470

454-
return true; // Success
471+
s_internal_temporaryUnmap();
472+
return true;
455473
}
456-
457474
/***************************************************************************************************
458475
* Deletes a Page Directory and its Page tables.
459476
*

src/kernel/x86/process.c

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,11 @@ static bool s_createProcessPageDirectory (KProcessInfo* pinfo)
208208
if (BIT_ISUNSET (pinfo->flags, PROCESS_FLAGS_THREAD)) {
209209
// Create physical memory for Page directory of the new non-thread process
210210
Physical newPD;
211-
if (!kpg_createNewPageDirectory (&newPD, PG_NEWPD_FLAG_COPY_KERNEL_PAGES |
212-
PG_NEWPD_FLAG_RECURSIVE_MAP)) {
211+
Physical kernelPD = kvmm_getPageDirectory (g_kstate.context);
212+
if (!kpg_setupPageDirectory (&newPD,
213+
PG_NEWPD_FLAG_CREATE_NEW | PG_NEWPD_FLAG_COPY_KERNEL_PAGES |
214+
PG_NEWPD_FLAG_RECURSIVE_MAP,
215+
&kernelPD)) {
213216
RETURN_ERROR (ERROR_PASSTHROUGH, false); // Cannot create new process.
214217
}
215218

@@ -829,3 +832,27 @@ bool kprocess_pushEvent (UINT pid, UINT eventID, UINT eventData)
829832
enqueue (&pinfo->eventsQueueHead, &e->eventQueueNode);
830833
return true;
831834
}
835+
836+
void kprocess_syncPD()
837+
{
838+
FUNC_ENTRY();
839+
840+
if (!KERNEL_PHASE_CHECK (KERNEL_PHASE_STATE_KERNEL_READY)) {
841+
return;
842+
}
843+
844+
Physical kernelPD = kvmm_getPageDirectory (g_kstate.context);
845+
846+
ListNode* node = NULL;
847+
KProcessInfo* p = NULL;
848+
list_for_each (&schedulerQueueHead, node)
849+
{
850+
p = LIST_ITEM (node, KProcessInfo, schedulerQueueNode);
851+
Physical pd = kvmm_getPageDirectory (p->context);
852+
if (!kpg_setupPageDirectory (&pd,
853+
PG_NEWPD_FLAG_COPY_KERNEL_PAGES | PG_NEWPD_FLAG_RECURSIVE_MAP,
854+
&kernelPD)) {
855+
FATAL_BUG(); // No reason it should fail.
856+
}
857+
}
858+
}

0 commit comments

Comments
 (0)