/*
* Copyright (c) 1998-2003 Michael Shalayeff
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Reserve some bits for chips that don't like to be moved
* around, e.g. lasi and asp.
*/
ir->ir_rbits = ((1 << 28) | (1 << 27));
ir->ir_bits &= ~ir->ir_rbits;
#ifdef MULTIPROCESSOR
/* Allocate stack for spin up and FPU emulation. */
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(PAGE_SIZE, 0, -1L, PAGE_SIZE, 0, &mlist, 1, 0);
if (error) {
aprint_error(": unable to allocate CPU stack!\n");
return;
}
m = TAILQ_FIRST(&mlist);
ci->ci_stack = VM_PAGE_TO_PHYS(m);
ci->ci_softc = sc;
if (ci->ci_hpa == hppa_mcpuhpa) {
ci->ci_flags |= CPUF_PRIMARY|CPUF_RUNNING;
} else {
int err;
#ifdef MULTIPROCESSOR
void
cpu_boot_secondary_processors(void)
{
struct cpu_info *ci;
struct iomod *cpu;
int i, j;
for (i = 0; i < HPPA_MAXCPUS; i++) {
ci = &cpus[i];
if (ci->ci_cpuid == 0)
continue;
if (ci->ci_data.cpu_idlelwp == NULL)
continue;
if (ci->ci_flags & CPUF_PRIMARY)
continue;
/*
* Release the specified CPU by triggering an EIR{0}.
*
* The `load-acquire operation' matching this
* store-release is somewhere inside the silicon or
* firmware -- the point is that the store to
* cpu_hatch_info must happen before writing EIR{0};
* there is conceptually some magic inside the silicon
* or firmware that effectively does
*
* if (atomic_load_acquire(&cpu->io_eir) == 0) {
* hw_cpu_spinup_trampoline();
* }
*
* so that hw_cpu_spinup_trampoline correctly sees the
* value we just stored at cpu_hatch_info.
*/
cpu_hatch_info = ci;
cpu = (struct iomod *)(ci->ci_hpa);
atomic_store_release(&cpu->io_eir, 0);
/* Wait for CPU to wake up... */
j = 0;
while (!(ci->ci_flags & CPUF_RUNNING) && j++ < 10000)
delay(1000);
if (!(ci->ci_flags & CPUF_RUNNING))
printf("failed to hatch cpu %i!\n", ci->ci_cpuid);
}
/*
* Wait for additional CPUs to spinup.
*
* Matches store-release in cpu_boot_secondary_processors.
*/
while (!atomic_load_acquire(&start_secondary_cpu))
;