loongson-dune icon indicating copy to clipboard operation
loongson-dune copied to clipboard

kernel mode : init

Open Martins3 opened this issue 4 years ago • 8 comments

  • [ ] init
    • [x] seems no hardware enable is needed, kind of unbelieveable
    • [ ] hardware_enable_all
    • [ ] kvm_vz_hardware_enable
    • [ ] kvm_vz_vcpu_put
  • [ ] entry setup
    • [ ] remove kvm_host.h : kvm_vcpu and arch_vcpu
    • [ ] register the entry
  • [ ] hypercall / skip one instruction
  • [ ] PF_VCPU
	if (current->flags & PF_VCPU)
		kvm_vz_vcpu_save_wired(vcpu);

what should be inited and how?

currently, I just want to do a following how dune works:

  • dune_enter => vz_launch => create_vcpu && create_vm (Yes, VM and VCPU is one-to-one pair)
  • dune_init(this is module related init)

use Loongson VZ instead of VZ

  • [ ] we will diff the code later

Martins3 avatar Dec 19 '20 03:12 Martins3

static int __noclone vz_run_vcpu(struct vz_vcpu *vcpu)
  • [ ] __noclone

Martins3 avatar Dec 19 '20 03:12 Martins3

init

kvm_arch_vcpu_create

Martins3 avatar Dec 19 '20 06:12 Martins3

/* FPU/MSA context management */
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 4bd89d8d775b..0397432d8db7 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -305,6 +305,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));

+  // TODO seems we are recovering epc back !
        /* Set Guest EPC */
        UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
        UASM_i_MTC0(&p, T0, C0_EPC);
@@ -363,6 +364,7 @@ static void *kvm_mips_build_enter_guest(void *addr)

        /* Root ASID Dealias (RAD) */

+  // TODO set asid ?
        /* Save host ASID */
        UASM_i_MFC0(&p, K0, C0_ENTRYHI);
        UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
@@ -462,6 +464,9 @@ static void *kvm_mips_build_enter_guest(void *addr)
        return p;
 }

+// TODO compare this with `build_loongson3_tlb_refill_handler`,
+// Almost same, but no check_for_high_segbits, why ?
+// In fact, we support 48bit address space ?
 /**
  * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
  * @addr:      Address to start writing code.
@@ -572,7 +577,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
  *
  * Returns:    Next address after end of written function.
  */
-void *kvm_mips_build_exception(void *addr, void *handler)
+void *dune_mips_build_exception(void *addr, void *handler)
 {
        u32 *p = addr;
        struct uasm_label labels[2];
@@ -674,12 +679,17 @@ void *kvm_mips_build_exit(void *addr)
        UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);

        UASM_i_MFC0(&p, K0, C0_BADVADDR);
+  // why save GPA into guest cp0_badvaddr
+  // guest cp0's badvaddr contains the GVA, so it's fair reasonable
+  //
+  // 1. TODO guest has a separate cp0, find the evidence
        UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
                  K1);

        uasm_i_mfc0(&p, K0, C0_CAUSE);
        uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);

+  // TODO what's gscause
        uasm_i_mfc0(&p, K0, C0_GSCAUSE);
        uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_gscause), K1);

diff --git a/arch/mips/kvm/ls3acomp-vz.c b/arch/mips/kvm/ls3acomp-vz.c
index 5701010f1d4c..589bdf2f725e 100644
--- a/arch/mips/kvm/ls3acomp-vz.c
+++ b/arch/mips/kvm/ls3acomp-vz.c
@@ -2735,6 +2735,9 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
                        /* This will clobber guest TLB contents too */
                        ret = 1;
                }
+    // TODO asid seems only related to flush
+    // - [ ] emulate.c:kvm_mips_change_entryhi
+    //   - [ ] mips also implement shadow MMU
                /*
                 * For Root ASID Dealias (RAD) we don't do anything here, but we
                 * still need the request to ensure we recheck asid_flush_mask.
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f844c4135735..212280961827 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -458,6 +458,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
                                         handler);
        }

+  // TODO what's difference between this and host normal exit handler ?
+  // - what does it contains ?
        /* General exit handler */
        p = handler;
        p = kvm_mips_build_exit(p);
@@ -1944,6 +1946,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
        if (ret == RESUME_GUEST)
                kvm_timer_callbacks->acquire_htimer(vcpu);

+  // TODO summarize the relation between variable er and ret
+  // RESUME_HOST : something terrible
+  // RESUME_GUEST : continue to guest
        if (er == EMULATE_DONE && !(ret & RESUME_HOST))
                kvm_mips_deliver_interrupts(vcpu, cause);

diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index e2840cad409f..913543b014e1 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -181,6 +181,7 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
                                   struct kvm_mmu_memory_cache *cache,
                                   unsigned long addr)
 {
+  // kvm->arch.gpa_mm
        return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
 }

@@ -924,6 +925,9 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
                             bool write_fault,
                             pte_t *out_entry, pte_t *out_buddy)
 {
+  // TODO Yes, this function fix the broken map from gpa => pa,
+  // but entry is loaded to TLB, otherwise the gpa => pa is load to TLB
+  // by hardware automatically.
        struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
        gfn_t gfn = gpa >> PAGE_SHIFT;
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 874960acdc52..988d29494602 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -1395,6 +1395,7 @@ static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
        return EMULATE_FAIL;
 }

+// TODO Virtualization Manual :  Table 5.3 GuestCtl0 GExcCode values
 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
 {
        u32 *opc = (u32 *) vcpu->arch.pc;
@@ -1411,6 +1412,10 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
                er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
                break;
        case MIPS_GCTL0_GEXC_GSFC:
+    // 4.7.8 Guest Software Field Change Exception
+    //
+    // This exception can only be raised by a D/MTC0 instruction executed in guest mode.
+    // Changes to the following CP0 register bitfields always trigger the exception.
                ++vcpu->stat.vz_gsfc_exits;
                er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
                break;
@@ -1430,6 +1435,7 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
                break;
        case MIPS_GCTL0_GEXC_GHFC:
                ++vcpu->stat.vz_ghfc_exits;
+    // 4.7.9 Guest Hardware Field Change Exception
                er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
                break;
        case MIPS_GCTL0_GEXC_GPA:
@@ -3205,6 +3211,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
        .vcpu_load = kvm_vz_vcpu_load,
        .vcpu_put = kvm_vz_vcpu_put,
        .vcpu_run = kvm_vz_vcpu_run,
+  // TODO difference between reenter and enter
        .vcpu_reenter = kvm_vz_vcpu_reenter,
 };

Martins3 avatar Dec 19 '20 06:12 Martins3

	/**
         * Allocate comm page for guest kernel, a TLB will be reserved for
	 * mapping GVA @ 0xFFFF8000 to this page
	 */

	/* COP0 state is mapped into Guest kernel via commpage */
  • [ ] 0xFFFF8000 :-1: feels like a mips32 address.
  • [ ] maybe COP0 can mapped to other places.

Martins3 avatar Dec 19 '20 07:12 Martins3

  • [ ] vzguestid
  • [ ] guest tlb
  • [ ] wired tlb
	/* wired guest TLB entries */
	struct kvm_mips_tlb *wired_tlb;
	unsigned int wired_tlb_limit;
	unsigned int wired_tlb_used;

Martins3 avatar Dec 19 '20 07:12 Martins3

  • [ ] why vz don't has to keep vcpu consistent with cpu
  • [ ] trace vcpu_load and vcpu_put
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
void vcpu_load(struct kvm_vcpu *vcpu)
{
	int cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
	kvm_arch_vcpu_load(vcpu, cpu);
	put_cpu();
}
EXPORT_SYMBOL_GPL(vcpu_load);
  • kvm_arch_vcpu_ioctl_run
    • vcpu_load
    • kvm_mips_callbacks->vcpu_run
    • vcpu_put
	kvm_preempt_ops.sched_in = kvm_sched_in;
	kvm_preempt_ops.sched_out = kvm_sched_out;

Last question, what should we load from vcpu to cpu?

Martins3 avatar Dec 19 '20 09:12 Martins3

  • [x] create a new vcpu after every fork?
    • temporary approach is : yes, and dune_conf is not the solution, sthread is.
    • [ ] maybe it's not a problem, maybe it's a better solution than sthread

Martins3 avatar Dec 19 '20 13:12 Martins3

  • [ ] singal
    • [ ] why we should block signal ?
    • [ ] block in user mode or in kernel ?
	kvm_sigset_activate(vcpu);

Martins3 avatar Dec 19 '20 13:12 Martins3