if ${MKCTF:Uno} == "yes"
CFLAGS+= -g
# Only need symbols for ctf, strip them after converting to CTF
CTFFLAGS= -L VERSION
CTFMFLAGS= -t -L VERSION
# Keep symbols if built with "-g"
if !empty(COPTS:M*-g*) || ${MKDEBUG:Uno} == "yes"
CTFFLAGS+= -g
CTFMFLAGS+= -g
endif
endif
# XXX until the kernel is fixed again...
CFLAGS+= -fno-strict-aliasing
CWARNFLAGS+= -Wno-pointer-sign -Wno-attributes
CWARNFLAGS+= -Wno-type-limits
CWARNFLAGS+= ${CC_WNO_ADDRESS_OF_PACKED_MEMBER}
# XXX This is a workaround for platforms that have relative relocations
# that, when relocated by the module loader, result in addresses that
# overflow the size of the relocation (e.g. R_PPC_REL24 in powerpc).
# The real solution to this involves generating trampolines for those
# relocations inside the loader and removing this workaround, as the
# resulting code would be much faster.
if ${MACHINE_CPU} == "aarch64"
CFLAGS+= -march=armv8-a+nofp+nosimd
elif ${MACHINE_CPU} == "arm"
CFLAGS+= -fno-common -fno-unwind-tables
elif ${MACHINE_CPU} == "hppa"
CFLAGS+= -mlong-calls -mno-space-regs -mfast-indirect-calls
elif ${MACHINE_CPU} == "powerpc"
CFLAGS+= ${${ACTIVE_CC} == "gcc":? -mlongcall :}
CFLAGS+= ${${ACTIVE_CC} == "gcc" && ${HAVE_GCC:U0} >= 9:? -mno-pltseq :}
elif ${MACHINE_CPU} == "vax"
CFLAGS+= -fno-pic
elif ${MACHINE_CPU} == "riscv"
CFLAGS+= -mcmodel=medany
elif ${MACHINE_ARCH} == "mips64eb" && !defined(BSD_MK_COMPAT_FILE)
CFLAGS+= -mabi=64
AFLAGS+= -mabi=64
LDFLAGS+= -Wl,-m,elf64btsmip
elif ${MACHINE_ARCH} == "mips64el" && !defined(BSD_MK_COMPAT_FILE)
CFLAGS+= -mabi=64
AFLAGS+= -mabi=64
LDFLAGS+= -Wl,-m,elf64ltsmip
endif
if ${MACHINE_CPU} == "mips"
# We can't use -msym32 with -mlong-calls as -msym32 forces all addresses
# to be 32-bit which defeats the whole purpose of long calls.
CFLAGS+= -mlong-calls
endif
if ${MACHINE_CPU} == "sparc64"
# force same memory model as rest of the kernel
CFLAGS+= ${${ACTIVE_CC} == "gcc":? -mcmodel=medlow :}
CFLAGS+= ${${ACTIVE_CC} == "clang":? -mcmodel=small :}
endif
# evbppc needs some special help
if ${MACHINE} == "evbppc"
if ${MACHINE_CPU} == "arm"
# The solution to limited branch space involves generating trampolines for
# those relocations while creating the module, as the resulting code will
# be much faster and simplifies the loader.
ARCHDIR= $S/modules/arch/${MACHINE_CPU}
ASM_H= $S/arch/${MACHINE_CPU}/include/asm.h
CLEANFILES+= tmp.o tmp.S ${KMOD}_tmp.o ${KMOD}_tramp.o ${KMOD}_tramp.S
${KMOD}_tmp.o: ${OBJS} ${DPADD}
${_MKTARGET_LINK}
${LD} -r -o tmp.o ${OBJS}
${LD} -r \
$$(${OBJDUMP} --syms --reloc tmp.o | \
${TOOL_AWK} -f ${ARCHDIR}/kmodwrap.awk) \
-o ${.TARGET} tmp.o
if defined(PROGDEBUG)
for i in ${_INST_DIRS}
_DEBUG_INST_DIRS += ${DEBUGDIR}${i}
endfor
_INST_DIRS += ${_DEBUG_INST_DIRS}
_PROGDEBUG:= ${DESTDIR}${DEBUGDIR}${KMODULEDIR}/${PROG}.debug
endif
for _P P in ${_PROG} ${PROG} ${_PROGDEBUG} ${PROGDEBUG}
if ${MKUPDATE} == "no"
${_P}! ${P} # install rule
if !defined(BUILD) && !make(all) && !make(${P})
${_P}! .MADE # no build at install
endif
else
${_P}: ${P} # install rule
if !defined(BUILD) && !make(all) && !make(${P})
${_P}: .MADE # no build at install
endif
endif
${_MKTARGET_INSTALL}
dirs=${_INST_DIRS:Q}; \
for d in $$dirs; do \
${INSTALL_DIR} ${DESTDIR}$$d; \
done
${INSTALL_FILE} -o ${KMODULEOWN} -g ${KMODULEGRP} -m ${KMODULEMODE} \
${.ALLSRC} ${.TARGET}
##### Pull in related .mk logic
LINKSOWN?= ${KMODULEOWN}
LINKSGRP?= ${KMODULEGRP}
LINKSMODE?= ${KMODULEMODE}
include <bsd.man.mk>
include <bsd.links.mk>
include <bsd.dep.mk>
include <bsd.clean.mk>