/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
* Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by RedBack Networks Inc.
*
* Author: Bill Sommerfeld
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1999 Stefan Grefen
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Statically-allocated CPU info for the primary CPU (or the only
* CPU, on uniprocessors). The CPU info list is initialized to
* point at it.
*/
struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = {
.ci_dev = 0,
.ci_self = &cpu_info_primary,
.ci_idepth = -1,
.ci_curlwp = &lwp0,
.ci_curldt = -1,
};
struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = {
.ci_dev = 0,
.ci_self = &phycpu_info_primary,
};
/*
* If we're an Application Processor, allocate a cpu_info
* If we're the first attached CPU use the primary cpu_info,
* otherwise allocate a new one
*/
aprint_naive("\n");
aprint_normal("\n");
if (nphycpu > 0) {
struct cpu_info *tmp;
ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1,
KM_SLEEP);
ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
ci->ci_curldt = -1;
tmp = phycpu_info_list;
while (tmp->ci_next)
tmp = tmp->ci_next;
tmp->ci_next = ci;
} else {
ci = &phycpu_info_primary;
}
/*
* Knowing the size of the largest cache on this CPU, potentially
* re-color our pages.
*/
aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors);
uvm_page_recolor(ncolors);
pmap_tlb_cpu_init(ci);
#ifndef __HAVE_DIRECT_MAP
pmap_vpage_cpu_init(ci);
#endif
}
KASSERT(ci->ci_cpuid == ci->ci_index);
#ifdef __x86_64__
/* No user PGD mapped for this CPU yet */
ci->ci_xen_current_user_pgd = 0;
#endif
mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
pmap_reference(pmap_kernel());
ci->ci_pmap = pmap_kernel();
ci->ci_tlbstate = TLBSTATE_STALE;
/*
* Boot processor may not be attached first, but the below
* must be done to allow booting other processors.
*/
if (!again) {
atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
/* Basic init. */
cpu_intr_init(ci);
cpu_get_tsc_freq(ci);
cpu_init(ci);
pmap_cpu_init_late(ci);
/* Every processor needs to init its own ipi h/w (similar to lapic) */
xen_ipi_init();
/* Make sure DELAY() is initialized. */
DELAY(1);
again = true;
}
/* further PCB init done later. */
switch (caa->cpu_role) {
case CPU_ROLE_SP:
atomic_or_32(&ci->ci_flags, CPUF_SP);
cpu_identify(ci);
x86_cpu_idle_init();
break;
case CPU_ROLE_BP:
atomic_or_32(&ci->ci_flags, CPUF_BSP);
cpu_identify(ci);
x86_cpu_idle_init();
break;
case CPU_ROLE_AP:
atomic_or_32(&ci->ci_flags, CPUF_AP);
/*
* If we have FXSAVE/FXRESTOR, use them.
*/
if (cpu_feature[0] & CPUID_FXSR) {
cr4 |= CR4_OSFXSR;
/*
* If we have SSE/SSE2, enable XMM exceptions.
*/
if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2))
cr4 |= CR4_OSXMMEXCPT;
}
/* If xsave is supported, enable it */
if (cpu_feature[1] & CPUID2_XSAVE && x86_fpu_save >= FPU_SAVE_XSAVE)
cr4 |= CR4_OSXSAVE;
if (cr4) {
cr4 |= rcr4();
lcr4(cr4);
}
if (x86_fpu_save >= FPU_SAVE_FXSAVE) {
fpuinit_mxcsr_mask();
}
/*
* Changing CR4 register may change cpuid values. For example, setting
* CR4_OSXSAVE sets CPUID2_OSXSAVE. The CPUID2_OSXSAVE is in
* ci_feat_val[1], so update it.
* XXX Other than ci_feat_val[1] might be changed.
*/
if (cpuid_level >= 1) {
u_int descs[4];
for (i = 0; i < maxcpus; i++) {
ci = cpu_lookup(i);
if (ci == NULL)
continue;
if (ci->ci_data.cpu_idlelwp == NULL)
continue;
if ((ci->ci_flags & CPUF_PRESENT) == 0)
continue;
cpu_init_idle_lwp(ci);
}
}
static void
cpu_start_secondary(struct cpu_info *ci)
{
int i;
aprint_debug_dev(ci->ci_dev, "starting\n");
ci->ci_curlwp = ci->ci_data.cpu_idlelwp;
if (CPU_STARTUP(ci, (vaddr_t) cpu_hatch) != 0) {
return;
}
/*
* wait for it to become ready
*/
for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) {
delay(10);
}
if ((ci->ci_flags & CPUF_PRESENT) == 0) {
aprint_error_dev(ci->ci_dev, "failed to become ready\n");
#if defined(MPDEBUG) && defined(DDB)
printf("dropping into debugger; continue from here to resume boot\n");
Debugger();
#endif
}
CPU_START_CLEANUP(ci);
}
void
cpu_boot_secondary(struct cpu_info *ci)
{
int i;
atomic_or_32(&ci->ci_flags, CPUF_GO);
for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) {
delay(10);
}
if ((ci->ci_flags & CPUF_RUNNING) == 0) {
aprint_error_dev(ci->ci_dev, "CPU failed to start\n");
#if defined(MPDEBUG) && defined(DDB)
printf("dropping into debugger; continue from here to resume boot\n");
Debugger();
#endif
}
}
/*
* APs end up here immediately after initialisation and VCPUOP_up in
* mp_cpu_start().
* At this point, we are running in the idle pcb/idle stack of the new
* CPU. This function jumps to the idle loop and starts looking for
* work.
*/
extern void x86_64_tls_switch(struct lwp *);
void
cpu_hatch(void *v)
{
struct cpu_info *ci = (struct cpu_info *)v;
struct pcb *pcb;
int s, i;
/*
* Setup the "trampoline". On Xen, we setup nearly all cpu context
* outside a trampoline, so we prototype and call targetip like so:
* void targetip(struct cpu_info *);
*/
static void
gdt_prepframes(paddr_t *frames, vaddr_t base, uint32_t entries)
{
int i;
for (i = 0; i < entries; i++) {
frames[i] = ((paddr_t)xpmap_ptetomach(
(pt_entry_t *)(base + (i << PAGE_SHIFT)))) >> PAGE_SHIFT;
/* Mark Read-only */
pmap_pte_clearbits(kvtopte(base + (i << PAGE_SHIFT)),
PTE_W);
}
}
#ifdef __x86_64__
extern char *ldtstore;
static void
xen_init_amd64_vcpuctxt(struct cpu_info *ci, struct vcpu_guest_context *initctx,
void targetrip(struct cpu_info *))
{
/* page frames to point at GDT */
extern int gdt_size;
paddr_t frames[16];
psize_t gdt_ents;
/* resume with interrupts off */
vci = ci->ci_vcpu;
vci->evtchn_upcall_mask = 1;
__insn_barrier();
/* resume in kernel-mode */
initctx->flags = VGCF_in_kernel | VGCF_online;
/* Stack and entry points:
* We arrange for the stack frame for cpu_hatch() to
* appear as a callee frame of lwp_trampoline(). Being a
* leaf frame prevents trampling on any of the MD stack setup
* that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
*/
/* resume with interrupts off */
vci = ci->ci_vcpu;
vci->evtchn_upcall_mask = 1;
__insn_barrier();
/* resume in kernel-mode */
initctx->flags = VGCF_in_kernel | VGCF_online;
/* Stack frame setup for cpu_hatch():
* We arrange for the stack frame for cpu_hatch() to
* appear as a callee frame of lwp_trampoline(). Being a
* leaf frame prevents trampling on any of the MD stack setup
* that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop()
*/
/*
* Map user space address in kernel space and load
* user cr3
*/
new_pgd = pmap->pm_pdir;
KASSERT(pmap == ci->ci_pmap);
/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
for (i = 0; i < PDIR_SLOT_USERLIM; i++) {
KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
xpq_queue_pte_update(l4_pd_ma + i * sizeof(pd_entry_t),
new_pgd[i]);
}
}
xen_set_user_pgd(pmap_pdirpa(pmap, 0));
ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
#else
paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
/* don't update the kernel L3 slot */
for (i = 0; i < PDP_SIZE - 1; i++) {
xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
xpmap_ptom(pmap->pm_pdirpa[i]) | PTE_P);
}
#endif
tlbflush();
/* old pmap no longer visible to xen_kpm_sync() */
if (oldpmap != pmap_kernel()) {
kcpuset_atomic_clear(oldpmap->pm_xen_ptp_cpus, cid);
}
mutex_exit(&ci->ci_kpm_mtx);
}
/*
* pmap_cpu_init_late: perform late per-CPU initialization.
*
* Short note about percpu PDIR pages. Both the PAE and __x86_64__ architectures
* have per-cpu PDIR tables, for two different reasons:
* - on PAE, this is to get around Xen's pagetable setup constraints (multiple
* L3[3]s cannot point to the same L2 - Xen will refuse to pin a table set up
* this way).
* - on __x86_64__, this is for multiple CPUs to map in different user pmaps
* (see cpu_load_pmap()).
*
* What this means for us is that the PDIR of the pmap_kernel() is considered
* to be a canonical "SHADOW" PDIR with the following properties:
* - its recursive mapping points to itself
* - per-cpu recursive mappings point to themselves on __x86_64__
* - per-cpu L4 pages' kernel entries are expected to be in sync with
* the shadow
*/
void
pmap_cpu_init_late(struct cpu_info *ci)
{
int i;
/*
* The BP has already its own PD page allocated during early
* MD startup.
*/
/* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */
for (i = 0; i < PDP_SIZE - 1; i++) {
ci->ci_pae_l3_pdir[i] =
xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PTE_P;
}
#endif
#ifdef __x86_64__
xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
#else
/*
* Initialize L3 entry 3. This mapping is shared across all pmaps and is
* static, ie: loading a new pmap will not update this entry.
*/
ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PTE_P;