15439 lines
489 KiB
Diff
15439 lines
489 KiB
Diff
From 033015fbad811f653c8b8bfc9f8fb9943d7104f1 Mon Sep 17 00:00:00 2001
|
||
From: zhangyu <zhangyu4@kylinos.cn>
|
||
Date: Thu, 27 Feb 2025 14:01:35 +0800
|
||
Subject: [PATCH] openEuler-24.03-LTS
|
||
|
||
---
|
||
arch/arm/Kconfig | 6 +-
|
||
arch/arm/mm/fault.c | 6 +
|
||
arch/arm/vfp/vfpmodule.c | 74 +-
|
||
arch/arm64/Kconfig | 1 +
|
||
arch/powerpc/Kconfig | 2 +
|
||
arch/powerpc/include/asm/stackprotector.h | 7 +-
|
||
arch/powerpc/kernel/traps.c | 7 +-
|
||
arch/powerpc/kvm/Kconfig | 1 +
|
||
arch/powerpc/platforms/pseries/Kconfig | 1 +
|
||
arch/powerpc/platforms/pseries/iommu.c | 31 +-
|
||
arch/riscv/Kconfig | 2 +
|
||
arch/riscv/include/asm/cpufeature.h | 2 -
|
||
arch/riscv/include/asm/thread_info.h | 2 +
|
||
arch/riscv/kernel/cpufeature.c | 90 +-
|
||
arch/riscv/kernel/smpboot.c | 1 -
|
||
arch/x86/Kconfig | 2 +
|
||
arch/x86/include/asm/thread_info.h | 6 +-
|
||
drivers/acpi/processor_idle.c | 2 +-
|
||
drivers/block/zram/zram_drv.c | 37 +
|
||
drivers/block/zram/zram_drv.h | 3 +
|
||
drivers/crypto/ccp/hygon/psp-dev.c | 2 +-
|
||
drivers/crypto/ccp/hygon/tdm-dev.c | 2 +-
|
||
.../gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 53 +-
|
||
.../drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +-
|
||
.../drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +-
|
||
.../drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 23 +-
|
||
.../drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 10 +-
|
||
drivers/gpu/drm/i915/Kconfig | 1 -
|
||
drivers/gpu/drm/i915/display/intel_crtc.c | 15 +-
|
||
drivers/gpu/drm/i915/display/intel_vblank.c | 6 +-
|
||
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 7 +-
|
||
.../drm/i915/gt/intel_execlists_submission.c | 17 +-
|
||
drivers/gpu/drm/i915/gt/intel_reset.c | 12 +-
|
||
drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 +-
|
||
drivers/gpu/drm/i915/i915_request.c | 2 -
|
||
drivers/gpu/drm/i915/i915_trace.h | 6 +-
|
||
drivers/gpu/drm/i915/i915_utils.h | 2 +-
|
||
drivers/tty/serial/21285.c | 8 +-
|
||
drivers/tty/serial/8250/8250_aspeed_vuart.c | 6 +-
|
||
drivers/tty/serial/8250/8250_bcm7271.c | 28 +-
|
||
drivers/tty/serial/8250/8250_core.c | 54 +-
|
||
drivers/tty/serial/8250/8250_dma.c | 8 +-
|
||
drivers/tty/serial/8250/8250_dw.c | 8 +-
|
||
drivers/tty/serial/8250/8250_exar.c | 4 +-
|
||
drivers/tty/serial/8250/8250_fsl.c | 6 +-
|
||
drivers/tty/serial/8250/8250_mtk.c | 8 +-
|
||
drivers/tty/serial/8250/8250_omap.c | 52 +-
|
||
drivers/tty/serial/8250/8250_pci1xxxx.c | 8 +-
|
||
drivers/tty/serial/8250/8250_port.c | 259 ++-
|
||
drivers/tty/serial/altera_jtaguart.c | 28 +-
|
||
drivers/tty/serial/altera_uart.c | 20 +-
|
||
drivers/tty/serial/amba-pl010.c | 20 +-
|
||
drivers/tty/serial/amba-pl011.c | 14 +-
|
||
drivers/tty/serial/apbuart.c | 8 +-
|
||
drivers/tty/serial/ar933x_uart.c | 26 +-
|
||
drivers/tty/serial/arc_uart.c | 16 +-
|
||
drivers/tty/serial/atmel_serial.c | 24 +-
|
||
drivers/tty/serial/bcm63xx_uart.c | 22 +-
|
||
drivers/tty/serial/cpm_uart.c | 8 +-
|
||
drivers/tty/serial/digicolor-usart.c | 18 +-
|
||
drivers/tty/serial/dz.c | 32 +-
|
||
drivers/tty/serial/fsl_linflexuart.c | 26 +-
|
||
drivers/tty/serial/fsl_lpuart.c | 88 +-
|
||
drivers/tty/serial/icom.c | 26 +-
|
||
drivers/tty/serial/imx.c | 84 +-
|
||
drivers/tty/serial/ip22zilog.c | 36 +-
|
||
drivers/tty/serial/jsm/jsm_neo.c | 4 +-
|
||
drivers/tty/serial/jsm/jsm_tty.c | 16 +-
|
||
drivers/tty/serial/liteuart.c | 20 +-
|
||
drivers/tty/serial/lpc32xx_hs.c | 26 +-
|
||
drivers/tty/serial/ma35d1_serial.c | 22 +-
|
||
drivers/tty/serial/mcf.c | 20 +-
|
||
drivers/tty/serial/men_z135_uart.c | 8 +-
|
||
drivers/tty/serial/meson_uart.c | 30 +-
|
||
drivers/tty/serial/milbeaut_usio.c | 16 +-
|
||
drivers/tty/serial/mpc52xx_uart.c | 12 +-
|
||
drivers/tty/serial/mps2-uart.c | 16 +-
|
||
drivers/tty/serial/msm_serial.c | 38 +-
|
||
drivers/tty/serial/mvebu-uart.c | 18 +-
|
||
drivers/tty/serial/omap-serial.c | 44 +-
|
||
drivers/tty/serial/owl-uart.c | 26 +-
|
||
drivers/tty/serial/pch_uart.c | 10 +-
|
||
drivers/tty/serial/pic32_uart.c | 20 +-
|
||
drivers/tty/serial/pmac_zilog.c | 52 +-
|
||
drivers/tty/serial/pxa.c | 30 +-
|
||
drivers/tty/serial/qcom_geni_serial.c | 8 +-
|
||
drivers/tty/serial/rda-uart.c | 34 +-
|
||
drivers/tty/serial/rp2.c | 20 +-
|
||
drivers/tty/serial/sa1100.c | 20 +-
|
||
drivers/tty/serial/samsung_tty.c | 50 +-
|
||
drivers/tty/serial/sb1250-duart.c | 12 +-
|
||
drivers/tty/serial/sc16is7xx.c | 5 +
|
||
drivers/tty/serial/serial-tegra.c | 32 +-
|
||
drivers/tty/serial/serial_core.c | 92 +-
|
||
drivers/tty/serial/serial_mctrl_gpio.c | 4 +-
|
||
drivers/tty/serial/serial_port.c | 4 +-
|
||
drivers/tty/serial/serial_txx9.c | 26 +-
|
||
drivers/tty/serial/sh-sci.c | 68 +-
|
||
drivers/tty/serial/sifive.c | 16 +-
|
||
drivers/tty/serial/sprd_serial.c | 30 +-
|
||
drivers/tty/serial/st-asc.c | 18 +-
|
||
drivers/tty/serial/stm32-usart.c | 38 +-
|
||
drivers/tty/serial/sunhv.c | 28 +-
|
||
drivers/tty/serial/sunplus-uart.c | 26 +-
|
||
drivers/tty/serial/sunsab.c | 34 +-
|
||
drivers/tty/serial/sunsu.c | 46 +-
|
||
drivers/tty/serial/sunzilog.c | 42 +-
|
||
drivers/tty/serial/timbuart.c | 8 +-
|
||
drivers/tty/serial/uartlite.c | 18 +-
|
||
drivers/tty/serial/ucc_uart.c | 4 +-
|
||
drivers/tty/serial/vt8500_serial.c | 8 +-
|
||
drivers/tty/serial/xilinx_uartps.c | 56 +-
|
||
drivers/tty/tty_io.c | 11 +-
|
||
fs/proc/consoles.c | 14 +-
|
||
include/linux/bottom_half.h | 2 +
|
||
include/linux/console.h | 150 ++
|
||
include/linux/entry-common.h | 2 +-
|
||
include/linux/entry-kvm.h | 2 +-
|
||
include/linux/interrupt.h | 29 +
|
||
include/linux/netdevice.h | 4 +
|
||
include/linux/printk.h | 30 +-
|
||
include/linux/sched.h | 16 +-
|
||
include/linux/sched/idle.h | 8 +-
|
||
include/linux/sched/rt.h | 4 +
|
||
include/linux/serial_8250.h | 6 +
|
||
include/linux/serial_core.h | 43 +-
|
||
include/linux/thread_info.h | 24 +
|
||
include/linux/trace_events.h | 8 +-
|
||
kernel/Kconfig.preempt | 17 +-
|
||
kernel/entry/common.c | 4 +-
|
||
kernel/entry/kvm.c | 2 +-
|
||
kernel/futex/pi.c | 87 +-
|
||
kernel/futex/requeue.c | 6 +-
|
||
kernel/ksysfs.c | 12 +
|
||
kernel/locking/lockdep.c | 5 +
|
||
kernel/locking/rtmutex.c | 37 +-
|
||
kernel/locking/rwbase_rt.c | 8 +
|
||
kernel/locking/rwsem.c | 8 +-
|
||
kernel/locking/spinlock_rt.c | 6 +
|
||
kernel/locking/ww_rt_mutex.c | 2 +-
|
||
kernel/panic.c | 9 +
|
||
kernel/printk/Makefile | 2 +-
|
||
kernel/printk/internal.h | 121 ++
|
||
kernel/printk/nbcon.c | 1664 +++++++++++++++++
|
||
kernel/printk/printk.c | 750 ++++++--
|
||
kernel/printk/printk_ringbuffer.c | 360 +++-
|
||
kernel/printk/printk_ringbuffer.h | 54 +-
|
||
kernel/printk/printk_safe.c | 12 +
|
||
kernel/rcu/rcutorture.c | 6 +
|
||
kernel/rcu/tree_stall.h | 5 +
|
||
kernel/sched/core.c | 127 +-
|
||
kernel/sched/debug.c | 19 +
|
||
kernel/sched/fair.c | 49 +-
|
||
kernel/sched/features.h | 2 +-
|
||
kernel/sched/idle.c | 3 +-
|
||
kernel/sched/rt.c | 5 +-
|
||
kernel/sched/sched.h | 1 +
|
||
kernel/signal.c | 30 +-
|
||
kernel/softirq.c | 95 +-
|
||
kernel/time/hrtimer.c | 4 +-
|
||
kernel/time/tick-sched.c | 2 +-
|
||
kernel/time/timer.c | 11 +-
|
||
kernel/trace/trace.c | 2 +
|
||
kernel/trace/trace_output.c | 16 +-
|
||
net/core/dev.c | 39 +-
|
||
net/core/skbuff.c | 7 +-
|
||
166 files changed, 5015 insertions(+), 1510 deletions(-)
|
||
create mode 100644 kernel/printk/nbcon.c
|
||
|
||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
|
||
index 749935e2e..46910150c 100644
|
||
--- a/arch/arm/Kconfig
|
||
+++ b/arch/arm/Kconfig
|
||
@@ -34,6 +34,7 @@ config ARM
|
||
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
|
||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
|
||
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||
select ARCH_USE_BUILTIN_BSWAP
|
||
select ARCH_USE_CMPXCHG_LOCKREF
|
||
select ARCH_USE_MEMTEST
|
||
@@ -73,7 +74,7 @@ config ARM
|
||
select HAS_IOPORT
|
||
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
|
||
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
|
||
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
|
||
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
|
||
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
|
||
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
|
||
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
|
||
@@ -96,7 +97,7 @@ config ARM
|
||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||
select HAVE_EXIT_THREAD
|
||
- select HAVE_FAST_GUP if ARM_LPAE
|
||
+ select HAVE_FAST_GUP if ARM_LPAE && !(PREEMPT_RT && HIGHPTE)
|
||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||
select HAVE_FUNCTION_ERROR_INJECTION
|
||
select HAVE_FUNCTION_GRAPH_TRACER
|
||
@@ -118,6 +119,7 @@ config ARM
|
||
select HAVE_PERF_EVENTS
|
||
select HAVE_PERF_REGS
|
||
select HAVE_PERF_USER_STACK_DUMP
|
||
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
|
||
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
|
||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||
select HAVE_RSEQ
|
||
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
|
||
index fef62e4a9..622a30243 100644
|
||
--- a/arch/arm/mm/fault.c
|
||
+++ b/arch/arm/mm/fault.c
|
||
@@ -404,6 +404,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
||
if (addr < TASK_SIZE)
|
||
return do_page_fault(addr, fsr, regs);
|
||
|
||
+ if (interrupts_enabled(regs))
|
||
+ local_irq_enable();
|
||
+
|
||
if (user_mode(regs))
|
||
goto bad_area;
|
||
|
||
@@ -474,6 +477,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
||
static int
|
||
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||
{
|
||
+ if (interrupts_enabled(regs))
|
||
+ local_irq_enable();
|
||
+
|
||
do_bad_area(addr, fsr, regs);
|
||
return 0;
|
||
}
|
||
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
|
||
index 7e8773a2d..9fde36fcb 100644
|
||
--- a/arch/arm/vfp/vfpmodule.c
|
||
+++ b/arch/arm/vfp/vfpmodule.c
|
||
@@ -55,6 +55,34 @@ extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
|
||
*/
|
||
union vfp_state *vfp_current_hw_state[NR_CPUS];
|
||
|
||
+/*
|
||
+ * Claim ownership of the VFP unit.
|
||
+ *
|
||
+ * The caller may change VFP registers until vfp_unlock() is called.
|
||
+ *
|
||
+ * local_bh_disable() is used to disable preemption and to disable VFP
|
||
+ * processing in softirq context. On PREEMPT_RT kernels local_bh_disable() is
|
||
+ * not sufficient because it only serializes soft interrupt related sections
|
||
+ * via a local lock, but stays preemptible. Disabling preemption is the right
|
||
+ * choice here as bottom half processing is always in thread context on RT
|
||
+ * kernels so it implicitly prevents bottom half processing as well.
|
||
+ */
|
||
+static void vfp_lock(void)
|
||
+{
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_bh_disable();
|
||
+ else
|
||
+ preempt_disable();
|
||
+}
|
||
+
|
||
+static void vfp_unlock(void)
|
||
+{
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_bh_enable();
|
||
+ else
|
||
+ preempt_enable();
|
||
+}
|
||
+
|
||
/*
|
||
* Is 'thread's most up to date state stored in this CPUs hardware?
|
||
* Must be called from non-preemptible context.
|
||
@@ -240,7 +268,7 @@ static void vfp_panic(char *reason, u32 inst)
|
||
/*
|
||
* Process bitmask of exception conditions.
|
||
*/
|
||
-static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
|
||
+static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
|
||
{
|
||
int si_code = 0;
|
||
|
||
@@ -248,8 +276,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
|
||
|
||
if (exceptions == VFP_EXCEPTION_ERROR) {
|
||
vfp_panic("unhandled bounce", inst);
|
||
- vfp_raise_sigfpe(FPE_FLTINV, regs);
|
||
- return;
|
||
+ return FPE_FLTINV;
|
||
}
|
||
|
||
/*
|
||
@@ -277,8 +304,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
|
||
RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
|
||
RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
|
||
|
||
- if (si_code)
|
||
- vfp_raise_sigfpe(si_code, regs);
|
||
+ return si_code;
|
||
}
|
||
|
||
/*
|
||
@@ -324,6 +350,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
|
||
static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||
{
|
||
u32 fpscr, orig_fpscr, fpsid, exceptions;
|
||
+ int si_code2 = 0;
|
||
+ int si_code = 0;
|
||
|
||
pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
|
||
|
||
@@ -369,8 +397,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||
* unallocated VFP instruction but with FPSCR.IXE set and not
|
||
* on VFP subarch 1.
|
||
*/
|
||
- vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
|
||
- return;
|
||
+ si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
|
||
+ goto exit;
|
||
}
|
||
|
||
/*
|
||
@@ -394,14 +422,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||
*/
|
||
exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
|
||
if (exceptions)
|
||
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
||
+ si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
|
||
|
||
/*
|
||
* If there isn't a second FP instruction, exit now. Note that
|
||
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
|
||
*/
|
||
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
|
||
- return;
|
||
+ goto exit;
|
||
|
||
/*
|
||
* The barrier() here prevents fpinst2 being read
|
||
@@ -413,7 +441,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||
emulate:
|
||
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
|
||
if (exceptions)
|
||
- vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
||
+ si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
|
||
+exit:
|
||
+ vfp_unlock();
|
||
+ if (si_code2)
|
||
+ vfp_raise_sigfpe(si_code2, regs);
|
||
+ if (si_code)
|
||
+ vfp_raise_sigfpe(si_code, regs);
|
||
}
|
||
|
||
static void vfp_enable(void *unused)
|
||
@@ -512,11 +546,9 @@ static inline void vfp_pm_init(void) { }
|
||
*/
|
||
void vfp_sync_hwstate(struct thread_info *thread)
|
||
{
|
||
- unsigned int cpu = get_cpu();
|
||
+ vfp_lock();
|
||
|
||
- local_bh_disable();
|
||
-
|
||
- if (vfp_state_in_hw(cpu, thread)) {
|
||
+ if (vfp_state_in_hw(raw_smp_processor_id(), thread)) {
|
||
u32 fpexc = fmrx(FPEXC);
|
||
|
||
/*
|
||
@@ -527,8 +559,7 @@ void vfp_sync_hwstate(struct thread_info *thread)
|
||
fmxr(FPEXC, fpexc);
|
||
}
|
||
|
||
- local_bh_enable();
|
||
- put_cpu();
|
||
+ vfp_unlock();
|
||
}
|
||
|
||
/* Ensure that the thread reloads the hardware VFP state on the next use. */
|
||
@@ -683,7 +714,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||
if (!user_mode(regs))
|
||
return vfp_kmode_exception(regs, trigger);
|
||
|
||
- local_bh_disable();
|
||
+ vfp_lock();
|
||
fpexc = fmrx(FPEXC);
|
||
|
||
/*
|
||
@@ -748,6 +779,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||
* replay the instruction that trapped.
|
||
*/
|
||
fmxr(FPEXC, fpexc);
|
||
+ vfp_unlock();
|
||
} else {
|
||
/* Check for synchronous or asynchronous exceptions */
|
||
if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
|
||
@@ -762,17 +794,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
|
||
if (!(fpscr & FPSCR_IXE)) {
|
||
if (!(fpscr & FPSCR_LENGTH_MASK)) {
|
||
pr_debug("not VFP\n");
|
||
- local_bh_enable();
|
||
+ vfp_unlock();
|
||
return -ENOEXEC;
|
||
}
|
||
fpexc |= FPEXC_DEX;
|
||
}
|
||
}
|
||
bounce: regs->ARM_pc += 4;
|
||
+ /* VFP_bounce() will invoke vfp_unlock() */
|
||
VFP_bounce(trigger, fpexc, regs);
|
||
}
|
||
|
||
- local_bh_enable();
|
||
return 0;
|
||
}
|
||
|
||
@@ -819,7 +851,7 @@ void kernel_neon_begin(void)
|
||
unsigned int cpu;
|
||
u32 fpexc;
|
||
|
||
- local_bh_disable();
|
||
+ vfp_lock();
|
||
|
||
/*
|
||
* Kernel mode NEON is only allowed outside of hardirq context with
|
||
@@ -850,7 +882,7 @@ void kernel_neon_end(void)
|
||
{
|
||
/* Disable the NEON/VFP unit. */
|
||
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
|
||
- local_bh_enable();
|
||
+ vfp_unlock();
|
||
}
|
||
EXPORT_SYMBOL(kernel_neon_end);
|
||
|
||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
||
index 9da9d58f1..bedb0c9e5 100644
|
||
--- a/arch/arm64/Kconfig
|
||
+++ b/arch/arm64/Kconfig
|
||
@@ -103,6 +103,7 @@ config ARM64
|
||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
|
||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||
+ select ARCH_SUPPORTS_RT
|
||
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
|
||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
|
||
index 2979c9f96..57f52ef31 100644
|
||
--- a/arch/powerpc/Kconfig
|
||
+++ b/arch/powerpc/Kconfig
|
||
@@ -166,6 +166,7 @@ config PPC
|
||
select ARCH_STACKWALK
|
||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
|
||
+ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||
select ARCH_USE_BUILTIN_BSWAP
|
||
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
|
||
select ARCH_USE_MEMTEST
|
||
@@ -269,6 +270,7 @@ config PPC
|
||
select HAVE_PERF_USER_STACK_DUMP
|
||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||
select HAVE_RELIABLE_STACKTRACE
|
||
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
|
||
select HAVE_RSEQ
|
||
select HAVE_SETUP_PER_CPU_AREA if PPC64
|
||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
|
||
index 283c34647..4727f4005 100644
|
||
--- a/arch/powerpc/include/asm/stackprotector.h
|
||
+++ b/arch/powerpc/include/asm/stackprotector.h
|
||
@@ -19,8 +19,13 @@
|
||
*/
|
||
static __always_inline void boot_init_stack_canary(void)
|
||
{
|
||
- unsigned long canary = get_random_canary();
|
||
+ unsigned long canary;
|
||
|
||
+#ifndef CONFIG_PREEMPT_RT
|
||
+ canary = get_random_canary();
|
||
+#else
|
||
+ canary = ((unsigned long)&canary) & CANARY_MASK;
|
||
+#endif
|
||
current->stack_canary = canary;
|
||
#ifdef CONFIG_PPC64
|
||
get_paca()->canary = canary;
|
||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
|
||
index f0c07b79a..4c03b1ea2 100644
|
||
--- a/arch/powerpc/kernel/traps.c
|
||
+++ b/arch/powerpc/kernel/traps.c
|
||
@@ -265,12 +265,17 @@ static char *get_mmu_str(void)
|
||
|
||
static int __die(const char *str, struct pt_regs *regs, long err)
|
||
{
|
||
+ const char *pr = "";
|
||
+
|
||
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
|
||
|
||
+ if (IS_ENABLED(CONFIG_PREEMPTION))
|
||
+ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
|
||
+
|
||
printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
|
||
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
|
||
PAGE_SIZE / 1024, get_mmu_str(),
|
||
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
|
||
+ pr,
|
||
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
|
||
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
|
||
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
|
||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
|
||
index 902611954..2f188137f 100644
|
||
--- a/arch/powerpc/kvm/Kconfig
|
||
+++ b/arch/powerpc/kvm/Kconfig
|
||
@@ -224,6 +224,7 @@ config KVM_E500MC
|
||
config KVM_MPIC
|
||
bool "KVM in-kernel MPIC emulation"
|
||
depends on KVM && PPC_E500
|
||
+ depends on !PREEMPT_RT
|
||
select HAVE_KVM_IRQCHIP
|
||
select HAVE_KVM_IRQFD
|
||
select HAVE_KVM_IRQ_ROUTING
|
||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
|
||
index 4ebf2ef28..381c3be3b 100644
|
||
--- a/arch/powerpc/platforms/pseries/Kconfig
|
||
+++ b/arch/powerpc/platforms/pseries/Kconfig
|
||
@@ -2,6 +2,7 @@
|
||
config PPC_PSERIES
|
||
depends on PPC64 && PPC_BOOK3S
|
||
bool "IBM pSeries & new (POWER5-based) iSeries"
|
||
+ select GENERIC_ALLOCATOR
|
||
select HAVE_PCSPKR_PLATFORM
|
||
select MPIC
|
||
select OF_DYNAMIC
|
||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
|
||
index b1e6d275c..9a8d3970d 100644
|
||
--- a/arch/powerpc/platforms/pseries/iommu.c
|
||
+++ b/arch/powerpc/platforms/pseries/iommu.c
|
||
@@ -25,6 +25,7 @@
|
||
#include <linux/of_address.h>
|
||
#include <linux/iommu.h>
|
||
#include <linux/rculist.h>
|
||
+#include <linux/local_lock.h>
|
||
#include <asm/io.h>
|
||
#include <asm/prom.h>
|
||
#include <asm/rtas.h>
|
||
@@ -206,7 +207,13 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
|
||
return ret;
|
||
}
|
||
|
||
-static DEFINE_PER_CPU(__be64 *, tce_page);
|
||
+struct tce_page {
|
||
+ __be64 * page;
|
||
+ local_lock_t lock;
|
||
+};
|
||
+static DEFINE_PER_CPU(struct tce_page, tce_page) = {
|
||
+ .lock = INIT_LOCAL_LOCK(lock),
|
||
+};
|
||
|
||
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||
long npages, unsigned long uaddr,
|
||
@@ -229,9 +236,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||
direction, attrs);
|
||
}
|
||
|
||
- local_irq_save(flags); /* to protect tcep and the page behind it */
|
||
+ /* to protect tcep and the page behind it */
|
||
+ local_lock_irqsave(&tce_page.lock, flags);
|
||
|
||
- tcep = __this_cpu_read(tce_page);
|
||
+ tcep = __this_cpu_read(tce_page.page);
|
||
|
||
/* This is safe to do since interrupts are off when we're called
|
||
* from iommu_alloc{,_sg}()
|
||
@@ -240,12 +248,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
||
/* If allocation fails, fall back to the loop implementation */
|
||
if (!tcep) {
|
||
- local_irq_restore(flags);
|
||
+ local_unlock_irqrestore(&tce_page.lock, flags);
|
||
return tce_build_pSeriesLP(tbl->it_index, tcenum,
|
||
tceshift,
|
||
npages, uaddr, direction, attrs);
|
||
}
|
||
- __this_cpu_write(tce_page, tcep);
|
||
+ __this_cpu_write(tce_page.page, tcep);
|
||
}
|
||
|
||
rpn = __pa(uaddr) >> tceshift;
|
||
@@ -275,7 +283,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||
tcenum += limit;
|
||
} while (npages > 0 && !rc);
|
||
|
||
- local_irq_restore(flags);
|
||
+ local_unlock_irqrestore(&tce_page.lock, flags);
|
||
|
||
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
||
ret = (int)rc;
|
||
@@ -459,16 +467,17 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
||
DMA_BIDIRECTIONAL, 0);
|
||
}
|
||
|
||
- local_irq_disable(); /* to protect tcep and the page behind it */
|
||
- tcep = __this_cpu_read(tce_page);
|
||
+ /* to protect tcep and the page behind it */
|
||
+ local_lock_irq(&tce_page.lock);
|
||
+ tcep = __this_cpu_read(tce_page.page);
|
||
|
||
if (!tcep) {
|
||
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
||
if (!tcep) {
|
||
- local_irq_enable();
|
||
+ local_unlock_irq(&tce_page.lock);
|
||
return -ENOMEM;
|
||
}
|
||
- __this_cpu_write(tce_page, tcep);
|
||
+ __this_cpu_write(tce_page.page, tcep);
|
||
}
|
||
|
||
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
|
||
@@ -511,7 +520,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
||
|
||
/* error cleanup: caller will clear whole range */
|
||
|
||
- local_irq_enable();
|
||
+ local_unlock_irq(&tce_page.lock);
|
||
return rc;
|
||
}
|
||
|
||
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
|
||
index 3be10e723..2a848e2c0 100644
|
||
--- a/arch/riscv/Kconfig
|
||
+++ b/arch/riscv/Kconfig
|
||
@@ -49,6 +49,7 @@ config RISCV
|
||
select ARCH_SUPPORTS_HUGETLBFS if MMU
|
||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
|
||
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
|
||
+ select ARCH_SUPPORTS_RT
|
||
select ARCH_USE_MEMTEST
|
||
select ARCH_USE_QUEUED_RWLOCKS
|
||
select ARCH_USES_CFI_TRAPS if CFI_CLANG
|
||
@@ -137,6 +138,7 @@ config RISCV
|
||
select HAVE_PERF_USER_STACK_DUMP
|
||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||
select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
|
||
+ select HAVE_PREEMPT_AUTO
|
||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||
select HAVE_RETHOOK if !XIP_KERNEL
|
||
select HAVE_RSEQ
|
||
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
|
||
index d0345bd65..23fed53b8 100644
|
||
--- a/arch/riscv/include/asm/cpufeature.h
|
||
+++ b/arch/riscv/include/asm/cpufeature.h
|
||
@@ -30,6 +30,4 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
|
||
/* Per-cpu ISA extensions. */
|
||
extern struct riscv_isainfo hart_isa[NR_CPUS];
|
||
|
||
-void check_unaligned_access(int cpu);
|
||
-
|
||
#endif
|
||
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
|
||
index 8c72d1bcd..c59ad9b7a 100644
|
||
--- a/arch/riscv/include/asm/thread_info.h
|
||
+++ b/arch/riscv/include/asm/thread_info.h
|
||
@@ -86,6 +86,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||
* - pending work-to-be-done flags are in lowest half-word
|
||
* - other flags in upper half-word(s)
|
||
*/
|
||
+#define TIF_ARCH_RESCHED_LAZY 0 /* Lazy rescheduling */
|
||
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
||
#define TIF_SIGPENDING 2 /* signal pending */
|
||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||
@@ -100,6 +101,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||
+#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
|
||
|
||
#define _TIF_WORK_MASK \
|
||
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
|
||
index bb5fb2b82..25b6819b6 100644
|
||
--- a/arch/riscv/kernel/cpufeature.c
|
||
+++ b/arch/riscv/kernel/cpufeature.c
|
||
@@ -8,6 +8,7 @@
|
||
|
||
#include <linux/acpi.h>
|
||
#include <linux/bitmap.h>
|
||
+#include <linux/cpuhotplug.h>
|
||
#include <linux/ctype.h>
|
||
#include <linux/log2.h>
|
||
#include <linux/memory.h>
|
||
@@ -30,6 +31,7 @@
|
||
|
||
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
|
||
#define MISALIGNED_BUFFER_SIZE 0x4000
|
||
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
|
||
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
|
||
|
||
unsigned long elf_hwcap __read_mostly;
|
||
@@ -575,14 +577,15 @@ unsigned long riscv_get_elf_hwcap(void)
|
||
return hwcap;
|
||
}
|
||
|
||
-void check_unaligned_access(int cpu)
|
||
+static int check_unaligned_access(void *param)
|
||
{
|
||
+ int cpu = smp_processor_id();
|
||
u64 start_cycles, end_cycles;
|
||
u64 word_cycles;
|
||
u64 byte_cycles;
|
||
int ratio;
|
||
unsigned long start_jiffies, now;
|
||
- struct page *page;
|
||
+ struct page *page = param;
|
||
void *dst;
|
||
void *src;
|
||
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
|
||
@@ -591,12 +594,6 @@ void check_unaligned_access(int cpu)
|
||
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
|
||
return;
|
||
|
||
- page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
|
||
- if (!page) {
|
||
- pr_warn("Can't alloc pages to measure memcpy performance");
|
||
- return;
|
||
- }
|
||
-
|
||
/* Make an unaligned destination buffer. */
|
||
dst = (void *)((unsigned long)page_address(page) | 0x1);
|
||
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
|
||
@@ -649,7 +646,7 @@ void check_unaligned_access(int cpu)
|
||
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
|
||
cpu);
|
||
|
||
- goto out;
|
||
+ return 0;
|
||
}
|
||
|
||
if (word_cycles < byte_cycles)
|
||
@@ -663,18 +660,83 @@ void check_unaligned_access(int cpu)
|
||
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
|
||
|
||
per_cpu(misaligned_access_speed, cpu) = speed;
|
||
+ return 0;
|
||
+}
|
||
|
||
-out:
|
||
- __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
|
||
+static void check_unaligned_access_nonboot_cpu(void *param)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct page **pages = param;
|
||
+
|
||
+ if (smp_processor_id() != 0)
|
||
+ check_unaligned_access(pages[cpu]);
|
||
}
|
||
|
||
-static int check_unaligned_access_boot_cpu(void)
|
||
+static int riscv_online_cpu(unsigned int cpu)
|
||
{
|
||
- check_unaligned_access(0);
|
||
+ static struct page *buf;
|
||
+
|
||
+ /* We are already set since the last check */
|
||
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
|
||
+ return 0;
|
||
+
|
||
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
|
||
+ if (!buf) {
|
||
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
|
||
+ return -ENOMEM;
|
||
+ }
|
||
+
|
||
+ check_unaligned_access(buf);
|
||
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/* Measure unaligned access on all CPUs present at boot in parallel. */
|
||
+static int check_unaligned_access_all_cpus(void)
|
||
+{
|
||
+ unsigned int cpu;
|
||
+ unsigned int cpu_count = num_possible_cpus();
|
||
+ struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
|
||
+ GFP_KERNEL);
|
||
+
|
||
+ if (!bufs) {
|
||
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
|
||
+ return 0;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Allocate separate buffers for each CPU so there's no fighting over
|
||
+ * cache lines.
|
||
+ */
|
||
+ for_each_cpu(cpu, cpu_online_mask) {
|
||
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
|
||
+ if (!bufs[cpu]) {
|
||
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
|
||
+ goto out;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /* Check everybody except 0, who stays behind to tend jiffies. */
|
||
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
|
||
+
|
||
+ /* Check core 0. */
|
||
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
|
||
+
|
||
+ /* Setup hotplug callback for any new CPUs that come online. */
|
||
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
|
||
+ riscv_online_cpu, NULL);
|
||
+
|
||
+out:
|
||
+ for_each_cpu(cpu, cpu_online_mask) {
|
||
+ if (bufs[cpu])
|
||
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
|
||
+ }
|
||
+
|
||
+ kfree(bufs);
|
||
return 0;
|
||
}
|
||
|
||
-arch_initcall(check_unaligned_access_boot_cpu);
|
||
+arch_initcall(check_unaligned_access_all_cpus);
|
||
|
||
#ifdef CONFIG_RISCV_ALTERNATIVE
|
||
/*
|
||
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
|
||
index 1b8da4e40..2cb5e6514 100644
|
||
--- a/arch/riscv/kernel/smpboot.c
|
||
+++ b/arch/riscv/kernel/smpboot.c
|
||
@@ -246,7 +246,6 @@ asmlinkage __visible void smp_callin(void)
|
||
|
||
numa_add_cpu(curr_cpuid);
|
||
set_cpu_online(curr_cpuid, 1);
|
||
- check_unaligned_access(curr_cpuid);
|
||
|
||
if (has_vector()) {
|
||
if (riscv_v_setup_vsize())
|
||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||
index 7efe93e4e..f05bebbc1 100644
|
||
--- a/arch/x86/Kconfig
|
||
+++ b/arch/x86/Kconfig
|
||
@@ -122,6 +122,7 @@ config X86
|
||
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
|
||
select ARCH_SUPPORTS_LTO_CLANG
|
||
select ARCH_SUPPORTS_LTO_CLANG_THIN
|
||
+ select ARCH_SUPPORTS_RT
|
||
select ARCH_USE_BUILTIN_BSWAP
|
||
select ARCH_USE_MEMTEST
|
||
select ARCH_USE_QUEUED_RWLOCKS
|
||
@@ -279,6 +280,7 @@ config X86
|
||
select HAVE_STATIC_CALL
|
||
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
|
||
select HAVE_PREEMPT_DYNAMIC_CALL
|
||
+ select HAVE_PREEMPT_AUTO
|
||
select HAVE_RSEQ
|
||
select HAVE_RUST if X86_64
|
||
select HAVE_SYSCALL_TRACEPOINTS
|
||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
||
index 76e13733b..6c62c6852 100644
|
||
--- a/arch/x86/include/asm/thread_info.h
|
||
+++ b/arch/x86/include/asm/thread_info.h
|
||
@@ -85,8 +85,9 @@ struct thread_info {
|
||
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
||
#define TIF_SIGPENDING 2 /* signal pending */
|
||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||
-#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
||
-#define TIF_SSBD 5 /* Speculative store bypass disable */
|
||
+#define TIF_ARCH_RESCHED_LAZY 4 /* Lazy rescheduling */
|
||
+#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
|
||
+#define TIF_SSBD 6 /* Speculative store bypass disable */
|
||
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
|
||
#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
|
||
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
|
||
@@ -109,6 +110,7 @@ struct thread_info {
|
||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||
+#define _TIF_ARCH_RESCHED_LAZY (1 << TIF_ARCH_RESCHED_LAZY)
|
||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||
#define _TIF_SSBD (1 << TIF_SSBD)
|
||
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
|
||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
|
||
index d154b5d77..52f5ce0a9 100644
|
||
--- a/drivers/acpi/processor_idle.c
|
||
+++ b/drivers/acpi/processor_idle.c
|
||
@@ -107,7 +107,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
|
||
*/
|
||
static void __cpuidle acpi_safe_halt(void)
|
||
{
|
||
- if (!tif_need_resched()) {
|
||
+ if (!need_resched()) {
|
||
raw_safe_halt();
|
||
raw_local_irq_disable();
|
||
}
|
||
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
||
index 42aff7991..d905e53f7 100644
|
||
--- a/drivers/block/zram/zram_drv.c
|
||
+++ b/drivers/block/zram/zram_drv.c
|
||
@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *zram, size_t index);
|
||
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
|
||
struct bio *parent);
|
||
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
|
||
+{
|
||
+ size_t index;
|
||
+
|
||
+ for (index = 0; index < num_pages; index++)
|
||
+ spin_lock_init(&zram->table[index].lock);
|
||
+}
|
||
+
|
||
+static int zram_slot_trylock(struct zram *zram, u32 index)
|
||
+{
|
||
+ int ret;
|
||
+
|
||
+ ret = spin_trylock(&zram->table[index].lock);
|
||
+ if (ret)
|
||
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+static void zram_slot_lock(struct zram *zram, u32 index)
|
||
+{
|
||
+ spin_lock(&zram->table[index].lock);
|
||
+ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
|
||
+}
|
||
+
|
||
+static void zram_slot_unlock(struct zram *zram, u32 index)
|
||
+{
|
||
+ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
|
||
+ spin_unlock(&zram->table[index].lock);
|
||
+}
|
||
+
|
||
+#else
|
||
+
|
||
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
|
||
+
|
||
static int zram_slot_trylock(struct zram *zram, u32 index)
|
||
{
|
||
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
|
||
@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
|
||
{
|
||
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
|
||
}
|
||
+#endif
|
||
|
||
static inline bool init_done(struct zram *zram)
|
||
{
|
||
@@ -1252,6 +1288,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
||
|
||
if (!huge_class_size)
|
||
huge_class_size = zs_huge_class_size(zram->mem_pool);
|
||
+ zram_meta_init_table_locks(zram, num_pages);
|
||
return true;
|
||
}
|
||
|
||
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
|
||
index d090753f9..833abc17d 100644
|
||
--- a/drivers/block/zram/zram_drv.h
|
||
+++ b/drivers/block/zram/zram_drv.h
|
||
@@ -69,6 +69,9 @@ struct zram_table_entry {
|
||
unsigned long element;
|
||
};
|
||
unsigned long flags;
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+ spinlock_t lock;
|
||
+#endif
|
||
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
|
||
ktime_t ac_time;
|
||
#endif
|
||
diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c
|
||
index c150aa163..cdd2d1a39 100644
|
||
--- a/drivers/crypto/ccp/hygon/psp-dev.c
|
||
+++ b/drivers/crypto/ccp/hygon/psp-dev.c
|
||
@@ -17,7 +17,7 @@
|
||
#include <linux/delay.h>
|
||
#include <linux/sort.h>
|
||
#include <linux/bsearch.h>
|
||
-#include <linux/rwlock.h>
|
||
+//#include <linux/rwlock.h>
|
||
|
||
#include "psp-dev.h"
|
||
|
||
diff --git a/drivers/crypto/ccp/hygon/tdm-dev.c b/drivers/crypto/ccp/hygon/tdm-dev.c
|
||
index 42322c6bd..46aa8b02d 100644
|
||
--- a/drivers/crypto/ccp/hygon/tdm-dev.c
|
||
+++ b/drivers/crypto/ccp/hygon/tdm-dev.c
|
||
@@ -12,7 +12,7 @@
|
||
#include <linux/miscdevice.h>
|
||
#include <linux/psp-hygon.h>
|
||
#include <linux/spinlock.h>
|
||
-#include <linux/rwlock.h>
|
||
+//#include <linux/rwlock.h>
|
||
#include <linux/err.h>
|
||
#include <asm/current.h>
|
||
#include <linux/kthread.h>
|
||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||
index 172aa10a8..4ae472053 100644
|
||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||
@@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth);
|
||
*/
|
||
inline void dc_assert_fp_enabled(void)
|
||
{
|
||
- int *pcpu, depth = 0;
|
||
+ int depth;
|
||
|
||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||
- depth = *pcpu;
|
||
- put_cpu_ptr(&fpu_recursion_depth);
|
||
+ depth = __this_cpu_read(fpu_recursion_depth);
|
||
|
||
ASSERT(depth >= 1);
|
||
}
|
||
@@ -84,33 +82,28 @@ inline void dc_assert_fp_enabled(void)
|
||
*/
|
||
void dc_fpu_begin(const char *function_name, const int line)
|
||
{
|
||
- int *pcpu;
|
||
+ int depth;
|
||
|
||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||
- *pcpu += 1;
|
||
+ WARN_ON_ONCE(!in_task());
|
||
+ preempt_disable();
|
||
+ depth = __this_cpu_inc_return(fpu_recursion_depth);
|
||
|
||
- if (*pcpu == 1) {
|
||
+ if (depth == 1) {
|
||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||
- migrate_disable();
|
||
kernel_fpu_begin();
|
||
#elif defined(CONFIG_PPC64)
|
||
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||
- preempt_disable();
|
||
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
|
||
enable_kernel_vsx();
|
||
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
|
||
- preempt_disable();
|
||
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
|
||
enable_kernel_altivec();
|
||
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
|
||
- preempt_disable();
|
||
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
|
||
enable_kernel_fp();
|
||
- }
|
||
#elif defined(CONFIG_ARM64)
|
||
kernel_neon_begin();
|
||
#endif
|
||
}
|
||
|
||
- TRACE_DCN_FPU(true, function_name, line, *pcpu);
|
||
- put_cpu_ptr(&fpu_recursion_depth);
|
||
+ TRACE_DCN_FPU(true, function_name, line, depth);
|
||
}
|
||
|
||
/**
|
||
@@ -125,30 +118,26 @@ void dc_fpu_begin(const char *function_name, const int line)
|
||
*/
|
||
void dc_fpu_end(const char *function_name, const int line)
|
||
{
|
||
- int *pcpu;
|
||
+ int depth;
|
||
|
||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||
- *pcpu -= 1;
|
||
- if (*pcpu <= 0) {
|
||
+ depth = __this_cpu_dec_return(fpu_recursion_depth);
|
||
+ if (depth == 0) {
|
||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||
kernel_fpu_end();
|
||
- migrate_enable();
|
||
#elif defined(CONFIG_PPC64)
|
||
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
|
||
disable_kernel_vsx();
|
||
- preempt_enable();
|
||
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
|
||
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
|
||
disable_kernel_altivec();
|
||
- preempt_enable();
|
||
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
|
||
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
|
||
disable_kernel_fp();
|
||
- preempt_enable();
|
||
- }
|
||
#elif defined(CONFIG_ARM64)
|
||
kernel_neon_end();
|
||
#endif
|
||
+ } else {
|
||
+ WARN_ON_ONCE(depth < 0);
|
||
}
|
||
|
||
- TRACE_DCN_FPU(false, function_name, line, *pcpu);
|
||
- put_cpu_ptr(&fpu_recursion_depth);
|
||
+ TRACE_DCN_FPU(false, function_name, line, depth);
|
||
+ preempt_enable();
|
||
}
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||
index e0daa4e05..fdbb57fcb 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||
@@ -2142,9 +2142,17 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||
bool fast_validate)
|
||
{
|
||
bool voltage_supported;
|
||
+ display_e2e_pipe_params_st *pipes;
|
||
+
|
||
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||
+ if (!pipes)
|
||
+ return false;
|
||
+
|
||
DC_FP_START();
|
||
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
|
||
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
|
||
DC_FP_END();
|
||
+
|
||
+ kfree(pipes);
|
||
return voltage_supported;
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||
index 24105a5b9..349c22dac 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||
@@ -954,9 +954,17 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||
bool fast_validate)
|
||
{
|
||
bool voltage_supported;
|
||
+ display_e2e_pipe_params_st *pipes;
|
||
+
|
||
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||
+ if (!pipes)
|
||
+ return false;
|
||
+
|
||
DC_FP_START();
|
||
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
|
||
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
|
||
DC_FP_END();
|
||
+
|
||
+ kfree(pipes);
|
||
return voltage_supported;
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||
index 8a5a038fd..68970d6cf 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||
@@ -2018,7 +2018,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
|
||
}
|
||
|
||
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
|
||
- bool fast_validate)
|
||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||
{
|
||
bool out = false;
|
||
|
||
@@ -2027,7 +2027,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
||
int vlevel = 0;
|
||
int pipe_split_from[MAX_PIPES];
|
||
int pipe_cnt = 0;
|
||
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||
DC_LOGGER_INIT(dc->ctx->logger);
|
||
|
||
BW_VAL_TRACE_COUNT();
|
||
@@ -2062,16 +2061,14 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
||
out = false;
|
||
|
||
validate_out:
|
||
- kfree(pipes);
|
||
|
||
BW_VAL_TRACE_FINISH();
|
||
|
||
return out;
|
||
}
|
||
|
||
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||
- struct dc_state *context,
|
||
- bool fast_validate)
|
||
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||
{
|
||
bool voltage_supported = false;
|
||
bool full_pstate_supported = false;
|
||
@@ -2090,11 +2087,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||
ASSERT(context != dc->current_state);
|
||
|
||
if (fast_validate) {
|
||
- return dcn20_validate_bandwidth_internal(dc, context, true);
|
||
+ return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
|
||
}
|
||
|
||
// Best case, we support full UCLK switch latency
|
||
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
|
||
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||
|
||
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
|
||
@@ -2106,7 +2103,8 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||
// Fallback: Try to only support G6 temperature read latency
|
||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
|
||
|
||
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||
+ memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
|
||
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
|
||
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||
|
||
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
|
||
@@ -2311,9 +2309,8 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
|
||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||
}
|
||
|
||
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||
- struct dc_state *context,
|
||
- bool fast_validate)
|
||
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||
{
|
||
bool out = false;
|
||
|
||
@@ -2322,7 +2319,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||
int vlevel = 0;
|
||
int pipe_split_from[MAX_PIPES];
|
||
int pipe_cnt = 0;
|
||
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||
DC_LOGGER_INIT(dc->ctx->logger);
|
||
|
||
BW_VAL_TRACE_COUNT();
|
||
@@ -2362,7 +2358,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||
out = false;
|
||
|
||
validate_out:
|
||
- kfree(pipes);
|
||
|
||
BW_VAL_TRACE_FINISH();
|
||
|
||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||
index c51badf7b..b6c34198d 100644
|
||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||
@@ -61,9 +61,8 @@ void dcn20_update_bounding_box(struct dc *dc,
|
||
unsigned int num_states);
|
||
void dcn20_patch_bounding_box(struct dc *dc,
|
||
struct _vcs_dpi_soc_bounding_box_st *bb);
|
||
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||
- struct dc_state *context,
|
||
- bool fast_validate);
|
||
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||
+ bool fast_validate, display_e2e_pipe_params_st *pipes);
|
||
void dcn20_fpu_set_wm_ranges(int i,
|
||
struct pp_smu_wm_range_sets *ranges,
|
||
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
|
||
@@ -77,9 +76,8 @@ int dcn21_populate_dml_pipes_from_context(struct dc *dc,
|
||
struct dc_state *context,
|
||
display_e2e_pipe_params_st *pipes,
|
||
bool fast_validate);
|
||
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||
- struct dc_state *context,
|
||
- bool fast_validate);
|
||
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
|
||
+ fast_validate, display_e2e_pipe_params_st *pipes);
|
||
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||
|
||
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
|
||
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
|
||
index ce397a879..98c3f5328 100644
|
||
--- a/drivers/gpu/drm/i915/Kconfig
|
||
+++ b/drivers/gpu/drm/i915/Kconfig
|
||
@@ -3,7 +3,6 @@ config DRM_I915
|
||
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
|
||
depends on DRM
|
||
depends on X86 && PCI
|
||
- depends on !PREEMPT_RT
|
||
select INTEL_GTT if X86
|
||
select INTERVAL_TREE
|
||
# we need shmfs for the swappable backing store, and in particular
|
||
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
|
||
index cfbfbfed3..da2becfbc 100644
|
||
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
|
||
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
|
||
@@ -562,7 +562,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
|
||
*/
|
||
intel_psr_wait_for_idle_locked(new_crtc_state);
|
||
|
||
- local_irq_disable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_irq_disable();
|
||
|
||
crtc->debug.min_vbl = min;
|
||
crtc->debug.max_vbl = max;
|
||
@@ -587,11 +588,13 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
|
||
break;
|
||
}
|
||
|
||
- local_irq_enable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_irq_enable();
|
||
|
||
timeout = schedule_timeout(timeout);
|
||
|
||
- local_irq_disable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_irq_disable();
|
||
}
|
||
|
||
finish_wait(wq, &wait);
|
||
@@ -624,7 +627,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
|
||
return;
|
||
|
||
irq_disable:
|
||
- local_irq_disable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_irq_disable();
|
||
}
|
||
|
||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
|
||
@@ -728,7 +732,8 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
|
||
*/
|
||
intel_vrr_send_push(new_crtc_state);
|
||
|
||
- local_irq_enable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ local_irq_enable();
|
||
|
||
if (intel_vgpu_active(dev_priv))
|
||
return;
|
||
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
|
||
index f5659ebd0..5b6d2f555 100644
|
||
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
|
||
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
|
||
@@ -294,7 +294,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
|
||
*/
|
||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||
|
||
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ preempt_disable();
|
||
|
||
/* Get optional system timestamp before query. */
|
||
if (stime)
|
||
@@ -358,7 +359,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
|
||
if (etime)
|
||
*etime = ktime_get();
|
||
|
||
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ preempt_enable();
|
||
|
||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||
|
||
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
|
||
index f2973cd1a..aa77f8601 100644
|
||
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
|
||
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
|
||
@@ -315,7 +315,12 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
|
||
return;
|
||
|
||
/* Kick the work once more to drain the signalers, and disarm the irq */
|
||
- irq_work_queue(&b->irq_work);
|
||
+ irq_work_sync(&b->irq_work);
|
||
+ while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
|
||
+ irq_work_queue(&b->irq_work);
|
||
+ cond_resched();
|
||
+ irq_work_sync(&b->irq_work);
|
||
+ }
|
||
}
|
||
|
||
void intel_breadcrumbs_free(struct kref *kref)
|
||
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
|
||
index 2065be5a1..73d815fc5 100644
|
||
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
|
||
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
|
||
@@ -1303,7 +1303,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||
* and context switches) submission.
|
||
*/
|
||
|
||
- spin_lock(&sched_engine->lock);
|
||
+ spin_lock_irq(&sched_engine->lock);
|
||
|
||
/*
|
||
* If the queue is higher priority than the last
|
||
@@ -1403,7 +1403,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||
* Even if ELSP[1] is occupied and not worthy
|
||
* of timeslices, our queue might be.
|
||
*/
|
||
- spin_unlock(&sched_engine->lock);
|
||
+ spin_unlock_irq(&sched_engine->lock);
|
||
return;
|
||
}
|
||
}
|
||
@@ -1429,7 +1429,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||
|
||
if (last && !can_merge_rq(last, rq)) {
|
||
spin_unlock(&ve->base.sched_engine->lock);
|
||
- spin_unlock(&engine->sched_engine->lock);
|
||
+ spin_unlock_irq(&engine->sched_engine->lock);
|
||
return; /* leave this for another sibling */
|
||
}
|
||
|
||
@@ -1591,7 +1591,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||
*/
|
||
sched_engine->queue_priority_hint = queue_prio(sched_engine);
|
||
i915_sched_engine_reset_on_empty(sched_engine);
|
||
- spin_unlock(&sched_engine->lock);
|
||
+ spin_unlock_irq(&sched_engine->lock);
|
||
|
||
/*
|
||
* We can skip poking the HW if we ended up with exactly the same set
|
||
@@ -1617,13 +1617,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||
}
|
||
}
|
||
|
||
-static void execlists_dequeue_irq(struct intel_engine_cs *engine)
|
||
-{
|
||
- local_irq_disable(); /* Suspend interrupts across request submission */
|
||
- execlists_dequeue(engine);
|
||
- local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
|
||
-}
|
||
-
|
||
static void clear_ports(struct i915_request **ports, int count)
|
||
{
|
||
memset_p((void **)ports, NULL, count);
|
||
@@ -2478,7 +2471,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
|
||
}
|
||
|
||
if (!engine->execlists.pending[0]) {
|
||
- execlists_dequeue_irq(engine);
|
||
+ execlists_dequeue(engine);
|
||
start_timeslice(engine);
|
||
}
|
||
|
||
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
|
||
index 13fb8e504..b51fb0c97 100644
|
||
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
|
||
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
|
||
@@ -164,13 +164,13 @@ static int i915_do_reset(struct intel_gt *gt,
|
||
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
|
||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||
udelay(50);
|
||
- err = wait_for_atomic(i915_in_reset(pdev), 50);
|
||
+ err = _wait_for_atomic(i915_in_reset(pdev), 50, 0);
|
||
|
||
/* Clear the reset request. */
|
||
pci_write_config_byte(pdev, I915_GDRST, 0);
|
||
udelay(50);
|
||
if (!err)
|
||
- err = wait_for_atomic(!i915_in_reset(pdev), 50);
|
||
+ err = _wait_for_atomic(!i915_in_reset(pdev), 50, 0);
|
||
|
||
return err;
|
||
}
|
||
@@ -190,7 +190,7 @@ static int g33_do_reset(struct intel_gt *gt,
|
||
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
|
||
|
||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||
- return wait_for_atomic(g4x_reset_complete(pdev), 50);
|
||
+ return _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
|
||
}
|
||
|
||
static int g4x_do_reset(struct intel_gt *gt,
|
||
@@ -207,7 +207,7 @@ static int g4x_do_reset(struct intel_gt *gt,
|
||
|
||
pci_write_config_byte(pdev, I915_GDRST,
|
||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||
- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
|
||
+ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
|
||
if (ret) {
|
||
GT_TRACE(gt, "Wait for media reset failed\n");
|
||
goto out;
|
||
@@ -215,7 +215,7 @@ static int g4x_do_reset(struct intel_gt *gt,
|
||
|
||
pci_write_config_byte(pdev, I915_GDRST,
|
||
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||
- ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
|
||
+ ret = _wait_for_atomic(g4x_reset_complete(pdev), 50, 0);
|
||
if (ret) {
|
||
GT_TRACE(gt, "Wait for render reset failed\n");
|
||
goto out;
|
||
@@ -785,9 +785,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
|
||
reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
|
||
|
||
GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
|
||
- preempt_disable();
|
||
ret = reset(gt, reset_mask, retry);
|
||
- preempt_enable();
|
||
|
||
wa_14015076503_end(gt, reset_mask);
|
||
}
|
||
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
|
||
index 8dc291ff0..5b8d084c9 100644
|
||
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
|
||
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
|
||
@@ -317,7 +317,7 @@ static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
|
||
{
|
||
int err;
|
||
unsigned int sleep_period_ms = 1;
|
||
- bool not_atomic = !in_atomic() && !irqs_disabled();
|
||
+ bool not_atomic = !in_atomic() && !irqs_disabled() && !rcu_preempt_depth();
|
||
|
||
/*
|
||
* FIXME: Have caller pass in if we are in an atomic context to avoid
|
||
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
|
||
index f59081066..014d02029 100644
|
||
--- a/drivers/gpu/drm/i915/i915_request.c
|
||
+++ b/drivers/gpu/drm/i915/i915_request.c
|
||
@@ -609,7 +609,6 @@ bool __i915_request_submit(struct i915_request *request)
|
||
|
||
RQ_TRACE(request, "\n");
|
||
|
||
- GEM_BUG_ON(!irqs_disabled());
|
||
lockdep_assert_held(&engine->sched_engine->lock);
|
||
|
||
/*
|
||
@@ -718,7 +717,6 @@ void __i915_request_unsubmit(struct i915_request *request)
|
||
*/
|
||
RQ_TRACE(request, "\n");
|
||
|
||
- GEM_BUG_ON(!irqs_disabled());
|
||
lockdep_assert_held(&engine->sched_engine->lock);
|
||
|
||
/*
|
||
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
|
||
index ce1cbee1b..3c51620d0 100644
|
||
--- a/drivers/gpu/drm/i915/i915_trace.h
|
||
+++ b/drivers/gpu/drm/i915/i915_trace.h
|
||
@@ -6,6 +6,10 @@
|
||
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||
#define _I915_TRACE_H_
|
||
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+#define NOTRACE
|
||
+#endif
|
||
+
|
||
#include <linux/stringify.h>
|
||
#include <linux/types.h>
|
||
#include <linux/tracepoint.h>
|
||
@@ -322,7 +326,7 @@ DEFINE_EVENT(i915_request, i915_request_add,
|
||
TP_ARGS(rq)
|
||
);
|
||
|
||
-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
|
||
+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
|
||
DEFINE_EVENT(i915_request, i915_request_guc_submit,
|
||
TP_PROTO(struct i915_request *rq),
|
||
TP_ARGS(rq)
|
||
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
|
||
index c61066498..48e19e55d 100644
|
||
--- a/drivers/gpu/drm/i915/i915_utils.h
|
||
+++ b/drivers/gpu/drm/i915/i915_utils.h
|
||
@@ -288,7 +288,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
||
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
|
||
|
||
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
|
||
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
|
||
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
|
||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
|
||
#else
|
||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
|
||
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
|
||
index d756fcc88..4de0c975e 100644
|
||
--- a/drivers/tty/serial/21285.c
|
||
+++ b/drivers/tty/serial/21285.c
|
||
@@ -185,14 +185,14 @@ static void serial21285_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
unsigned int h_lcr;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
h_lcr = *CSR_H_UBRLCR;
|
||
if (break_state)
|
||
h_lcr |= H_UBRLCR_BREAK;
|
||
else
|
||
h_lcr &= ~H_UBRLCR_BREAK;
|
||
*CSR_H_UBRLCR = h_lcr;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int serial21285_startup(struct uart_port *port)
|
||
@@ -272,7 +272,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (port->fifosize)
|
||
h_lcr |= H_UBRLCR_FIFO;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -309,7 +309,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
*CSR_H_UBRLCR = h_lcr;
|
||
*CSR_UARTCON = 1;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *serial21285_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||
index 4a9e71b2d..021949f25 100644
|
||
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||
@@ -288,9 +288,9 @@ static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
|
||
struct uart_8250_port *up = up_to_u8250p(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
__aspeed_vuart_set_throttle(up, throttle);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void aspeed_vuart_throttle(struct uart_port *port)
|
||
@@ -340,7 +340,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
|
||
if (iir & UART_IIR_NO_INT)
|
||
return 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
lsr = serial_port_in(port, UART_LSR);
|
||
|
||
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
|
||
index 9afd5979c..db23b3a02 100644
|
||
--- a/drivers/tty/serial/8250/8250_bcm7271.c
|
||
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
|
||
@@ -567,7 +567,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
|
||
if (interrupts == 0)
|
||
return IRQ_NONE;
|
||
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
|
||
/* Clear all interrupts */
|
||
udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
|
||
@@ -581,7 +581,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
|
||
if ((rval | tval) == 0)
|
||
dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
|
||
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -608,10 +608,10 @@ static int brcmuart_startup(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
up->ier &= ~UART_IER_RDI;
|
||
serial_port_out(port, UART_IER, up->ier);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
|
||
priv->tx_running = false;
|
||
priv->dma.rx_dma = NULL;
|
||
@@ -629,7 +629,7 @@ static void brcmuart_shutdown(struct uart_port *port)
|
||
struct brcmuart_priv *priv = up->port.private_data;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
priv->shutdown = true;
|
||
if (priv->dma_enabled) {
|
||
stop_rx_dma(up);
|
||
@@ -645,7 +645,7 @@ static void brcmuart_shutdown(struct uart_port *port)
|
||
*/
|
||
up->dma = NULL;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
serial8250_do_shutdown(port);
|
||
}
|
||
|
||
@@ -807,7 +807,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||
* interrupt but there is no data ready.
|
||
*/
|
||
if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
|
||
- spin_lock_irqsave(&p->lock, flags);
|
||
+ uart_port_lock_irqsave(p, &flags);
|
||
status = serial_port_in(p, UART_LSR);
|
||
if ((status & UART_LSR_DR) == 0) {
|
||
|
||
@@ -832,7 +832,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||
|
||
handled = 1;
|
||
}
|
||
- spin_unlock_irqrestore(&p->lock, flags);
|
||
+ uart_port_unlock_irqrestore(p, flags);
|
||
if (handled)
|
||
return 1;
|
||
}
|
||
@@ -850,7 +850,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||
if (priv->shutdown)
|
||
return HRTIMER_NORESTART;
|
||
|
||
- spin_lock_irqsave(&p->lock, flags);
|
||
+ uart_port_lock_irqsave(p, &flags);
|
||
status = serial_port_in(p, UART_LSR);
|
||
|
||
/*
|
||
@@ -874,7 +874,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||
status |= UART_MCR_RTS;
|
||
serial_port_out(p, UART_MCR, status);
|
||
}
|
||
- spin_unlock_irqrestore(&p->lock, flags);
|
||
+ uart_port_unlock_irqrestore(p, flags);
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
|
||
@@ -1173,10 +1173,10 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
|
||
* This will prevent resume from enabling RTS before the
|
||
* baud rate has been restored.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
priv->saved_mctrl = port->mctrl;
|
||
port->mctrl &= ~TIOCM_RTS;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
serial8250_suspend_port(priv->line);
|
||
clk_disable_unprepare(priv->baud_mux_clk);
|
||
@@ -1215,10 +1215,10 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
|
||
|
||
if (priv->saved_mctrl & TIOCM_RTS) {
|
||
/* Restore RTS */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->mctrl |= TIOCM_RTS;
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
return 0;
|
||
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
|
||
index 3449f8790..30434718f 100644
|
||
--- a/drivers/tty/serial/8250/8250_core.c
|
||
+++ b/drivers/tty/serial/8250/8250_core.c
|
||
@@ -259,7 +259,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||
unsigned int iir, ier = 0, lsr;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Must disable interrupts or else we risk racing with the interrupt
|
||
@@ -292,7 +292,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||
if (up->port.irq)
|
||
serial_out(up, UART_IER, ier);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/* Standard timer interval plus 0.2s to keep the port running */
|
||
mod_timer(&up->timer,
|
||
@@ -592,6 +592,7 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
|
||
|
||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||
|
||
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
|
||
static void univ8250_console_write(struct console *co, const char *s,
|
||
unsigned int count)
|
||
{
|
||
@@ -599,6 +600,37 @@ static void univ8250_console_write(struct console *co, const char *s,
|
||
|
||
serial8250_console_write(up, s, count);
|
||
}
|
||
+#else
|
||
+static bool univ8250_console_write_atomic(struct console *co,
|
||
+ struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct uart_8250_port *up = &serial8250_ports[co->index];
|
||
+
|
||
+ return serial8250_console_write_atomic(up, wctxt);
|
||
+}
|
||
+
|
||
+static bool univ8250_console_write_thread(struct console *co,
|
||
+ struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct uart_8250_port *up = &serial8250_ports[co->index];
|
||
+
|
||
+ return serial8250_console_write_thread(up, wctxt);
|
||
+}
|
||
+
|
||
+static void univ8250_console_driver_enter(struct console *con, unsigned long *flags)
|
||
+{
|
||
+ struct uart_port *up = &serial8250_ports[con->index].port;
|
||
+
|
||
+ __uart_port_lock_irqsave(up, flags);
|
||
+}
|
||
+
|
||
+static void univ8250_console_driver_exit(struct console *con, unsigned long flags)
|
||
+{
|
||
+ struct uart_port *up = &serial8250_ports[con->index].port;
|
||
+
|
||
+ __uart_port_unlock_irqrestore(up, flags);
|
||
+}
|
||
+#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
|
||
|
||
static int univ8250_console_setup(struct console *co, char *options)
|
||
{
|
||
@@ -698,12 +730,20 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
|
||
|
||
static struct console univ8250_console = {
|
||
.name = "ttyS",
|
||
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
|
||
.write = univ8250_console_write,
|
||
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
|
||
+#else
|
||
+ .write_atomic = univ8250_console_write_atomic,
|
||
+ .write_thread = univ8250_console_write_thread,
|
||
+ .driver_enter = univ8250_console_driver_enter,
|
||
+ .driver_exit = univ8250_console_driver_exit,
|
||
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
|
||
+#endif
|
||
.device = uart_console_device,
|
||
.setup = univ8250_console_setup,
|
||
.exit = univ8250_console_exit,
|
||
.match = univ8250_console_match,
|
||
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
|
||
.index = -1,
|
||
.data = &serial8250_reg,
|
||
};
|
||
@@ -992,11 +1032,11 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
|
||
struct uart_port *port = &up->port;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||
up->port.read_status_mask |= UART_LSR_DR;
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/**
|
||
@@ -1194,9 +1234,9 @@ void serial8250_unregister_port(int line)
|
||
if (uart->em485) {
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&uart->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&uart->port, &flags);
|
||
serial8250_em485_destroy(uart);
|
||
- spin_unlock_irqrestore(&uart->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&uart->port, flags);
|
||
}
|
||
|
||
uart_remove_one_port(&serial8250_reg, &uart->port);
|
||
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
|
||
index 7fa665017..8b30ca8fd 100644
|
||
--- a/drivers/tty/serial/8250/8250_dma.c
|
||
+++ b/drivers/tty/serial/8250/8250_dma.c
|
||
@@ -22,7 +22,7 @@ static void __dma_tx_complete(void *param)
|
||
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
|
||
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
||
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
|
||
dma->tx_running = 0;
|
||
|
||
@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *param)
|
||
if (ret || !dma->tx_running)
|
||
serial8250_set_THRI(p);
|
||
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
}
|
||
|
||
static void __dma_rx_complete(struct uart_8250_port *p)
|
||
@@ -70,7 +70,7 @@ static void dma_rx_complete(void *param)
|
||
struct uart_8250_dma *dma = p->dma;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
if (dma->rx_running)
|
||
__dma_rx_complete(p);
|
||
|
||
@@ -80,7 +80,7 @@ static void dma_rx_complete(void *param)
|
||
*/
|
||
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
|
||
p->dma->rx_dma(p);
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
}
|
||
|
||
int serial8250_tx_dma(struct uart_8250_port *p)
|
||
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
|
||
index 72f9aab75..b8426ac63 100644
|
||
--- a/drivers/tty/serial/8250/8250_dw.c
|
||
+++ b/drivers/tty/serial/8250/8250_dw.c
|
||
@@ -263,20 +263,20 @@ static int dw8250_handle_irq(struct uart_port *p)
|
||
* so we limit the workaround only to non-DMA mode.
|
||
*/
|
||
if (!up->dma && rx_timeout) {
|
||
- spin_lock_irqsave(&p->lock, flags);
|
||
+ uart_port_lock_irqsave(p, &flags);
|
||
status = serial_lsr_in(up);
|
||
|
||
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
|
||
(void) p->serial_in(p, UART_RX);
|
||
|
||
- spin_unlock_irqrestore(&p->lock, flags);
|
||
+ uart_port_unlock_irqrestore(p, flags);
|
||
}
|
||
|
||
/* Manually stop the Rx DMA transfer when acting as flow controller */
|
||
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
|
||
- spin_lock_irqsave(&p->lock, flags);
|
||
+ uart_port_lock_irqsave(p, &flags);
|
||
status = serial_lsr_in(up);
|
||
- spin_unlock_irqrestore(&p->lock, flags);
|
||
+ uart_port_unlock_irqrestore(p, flags);
|
||
|
||
if (status & (UART_LSR_DR | UART_LSR_BI)) {
|
||
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
|
||
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
|
||
index 27430fdd9..17be6ad24 100644
|
||
--- a/drivers/tty/serial/8250/8250_exar.c
|
||
+++ b/drivers/tty/serial/8250/8250_exar.c
|
||
@@ -243,9 +243,9 @@ static int xr17v35x_startup(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
serial_port_out(port, UART_IER, 0);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
|
||
return serial8250_do_startup(port);
|
||
}
|
||
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
|
||
index 6af4e1c12..f522eb502 100644
|
||
--- a/drivers/tty/serial/8250/8250_fsl.c
|
||
+++ b/drivers/tty/serial/8250/8250_fsl.c
|
||
@@ -30,11 +30,11 @@ int fsl8250_handle_irq(struct uart_port *port)
|
||
unsigned int iir;
|
||
struct uart_8250_port *up = up_to_u8250p(port);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
iir = port->serial_in(port, UART_IIR);
|
||
if (iir & UART_IIR_NO_INT) {
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
return 0;
|
||
}
|
||
|
||
@@ -54,7 +54,7 @@ int fsl8250_handle_irq(struct uart_port *port)
|
||
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
|
||
up->lsr_saved_flags &= ~UART_LSR_BI;
|
||
port->serial_in(port, UART_RX);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
return 1;
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
|
||
index 28f9a2679..33699e86e 100644
|
||
--- a/drivers/tty/serial/8250/8250_mtk.c
|
||
+++ b/drivers/tty/serial/8250/8250_mtk.c
|
||
@@ -102,7 +102,7 @@ static void mtk8250_dma_rx_complete(void *param)
|
||
if (data->rx_status == DMA_RX_SHUTDOWN)
|
||
return;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
||
total = dma->rx_size - state.residue;
|
||
@@ -128,7 +128,7 @@ static void mtk8250_dma_rx_complete(void *param)
|
||
|
||
mtk8250_rx_dma(up);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static void mtk8250_rx_dma(struct uart_8250_port *up)
|
||
@@ -372,7 +372,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -420,7 +420,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (uart_console(port))
|
||
up->port.cons->cflag = termios->c_cflag;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
/* Don't rewrite B0 */
|
||
if (tty_termios_baud_rate(termios))
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
|
||
index 9ed62bc7c..e687cd0da 100644
|
||
--- a/drivers/tty/serial/8250/8250_omap.c
|
||
+++ b/drivers/tty/serial/8250/8250_omap.c
|
||
@@ -405,7 +405,7 @@ static void omap_8250_set_termios(struct uart_port *port,
|
||
* interrupts disabled.
|
||
*/
|
||
pm_runtime_get_sync(port->dev);
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -508,7 +508,7 @@ static void omap_8250_set_termios(struct uart_port *port,
|
||
}
|
||
omap8250_restore_regs(up);
|
||
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
pm_runtime_mark_last_busy(port->dev);
|
||
pm_runtime_put_autosuspend(port->dev);
|
||
|
||
@@ -533,7 +533,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
|
||
pm_runtime_get_sync(port->dev);
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
|
||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||
efr = serial_in(up, UART_EFR);
|
||
@@ -545,7 +545,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
|
||
serial_out(up, UART_EFR, efr);
|
||
serial_out(up, UART_LCR, 0);
|
||
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
|
||
pm_runtime_mark_last_busy(port->dev);
|
||
pm_runtime_put_autosuspend(port->dev);
|
||
@@ -676,7 +676,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
||
unsigned long delay;
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
up->ier = port->serial_in(port, UART_IER);
|
||
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
|
||
port->ops->stop_rx(port);
|
||
@@ -686,7 +686,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
||
*/
|
||
cancel_delayed_work(&up->overrun_backoff);
|
||
}
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
|
||
schedule_delayed_work(&up->overrun_backoff, delay);
|
||
@@ -733,10 +733,10 @@ static int omap_8250_startup(struct uart_port *port)
|
||
}
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
up->ier = UART_IER_RLSI | UART_IER_RDI;
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
|
||
#ifdef CONFIG_PM
|
||
up->capabilities |= UART_CAP_RPM;
|
||
@@ -749,9 +749,9 @@ static int omap_8250_startup(struct uart_port *port)
|
||
serial_out(up, UART_OMAP_WER, priv->wer);
|
||
|
||
if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
up->dma->rx_dma(up);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
}
|
||
|
||
enable_irq(up->port.irq);
|
||
@@ -777,10 +777,10 @@ static void omap_8250_shutdown(struct uart_port *port)
|
||
serial_out(up, UART_OMAP_EFR2, 0x0);
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
up->ier = 0;
|
||
serial_out(up, UART_IER, 0);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
disable_irq_nosync(up->port.irq);
|
||
dev_pm_clear_wake_irq(port->dev);
|
||
|
||
@@ -805,10 +805,10 @@ static void omap_8250_throttle(struct uart_port *port)
|
||
|
||
pm_runtime_get_sync(port->dev);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->ops->stop_rx(port);
|
||
priv->throttled = true;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
pm_runtime_mark_last_busy(port->dev);
|
||
pm_runtime_put_autosuspend(port->dev);
|
||
@@ -823,14 +823,14 @@ static void omap_8250_unthrottle(struct uart_port *port)
|
||
pm_runtime_get_sync(port->dev);
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
priv->throttled = false;
|
||
if (up->dma)
|
||
up->dma->rx_dma(up);
|
||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||
port->read_status_mask |= UART_LSR_DR;
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
pm_runtime_mark_last_busy(port->dev);
|
||
pm_runtime_put_autosuspend(port->dev);
|
||
@@ -974,7 +974,7 @@ static void __dma_rx_complete(void *param)
|
||
unsigned long flags;
|
||
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
|
||
/*
|
||
* If the tx status is not DMA_COMPLETE, then this is a delayed
|
||
@@ -983,7 +983,7 @@ static void __dma_rx_complete(void *param)
|
||
*/
|
||
if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
|
||
DMA_COMPLETE) {
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
return;
|
||
}
|
||
__dma_rx_do_complete(p);
|
||
@@ -994,7 +994,7 @@ static void __dma_rx_complete(void *param)
|
||
omap_8250_rx_dma(p);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
}
|
||
|
||
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
|
||
@@ -1099,7 +1099,7 @@ static void omap_8250_dma_tx_complete(void *param)
|
||
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
|
||
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
||
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
|
||
dma->tx_running = 0;
|
||
|
||
@@ -1128,7 +1128,7 @@ static void omap_8250_dma_tx_complete(void *param)
|
||
serial8250_set_THRI(p);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
}
|
||
|
||
static int omap_8250_tx_dma(struct uart_8250_port *p)
|
||
@@ -1294,7 +1294,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
status = serial_port_in(port, UART_LSR);
|
||
|
||
@@ -1774,15 +1774,15 @@ static int omap8250_runtime_resume(struct device *dev)
|
||
up = serial8250_get_port(priv->line);
|
||
|
||
if (up && omap8250_lost_context(up)) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
omap8250_restore_regs(up);
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
|
||
if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
omap_8250_rx_dma(up);
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
|
||
priv->latency = priv->calc_latency;
|
||
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||
index a3b25779d..53e238c8c 100644
|
||
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||
@@ -225,10 +225,10 @@ static bool pci1xxxx_port_suspend(int line)
|
||
if (port->suspended == 0 && port->dev) {
|
||
wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->mctrl &= ~TIOCM_OUT2;
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
|
||
}
|
||
@@ -251,10 +251,10 @@ static void pci1xxxx_port_resume(int line)
|
||
writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
|
||
|
||
if (port->suspended == 0) {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->mctrl |= TIOCM_OUT2;
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
mutex_unlock(&tport->mutex);
|
||
}
|
||
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
||
index a17803da8..2d4e775cd 100644
|
||
--- a/drivers/tty/serial/8250/8250_port.c
|
||
+++ b/drivers/tty/serial/8250/8250_port.c
|
||
@@ -557,6 +557,11 @@ static int serial8250_em485_init(struct uart_8250_port *p)
|
||
if (!p->em485)
|
||
return -ENOMEM;
|
||
|
||
+#ifndef CONFIG_SERIAL_8250_LEGACY_CONSOLE
|
||
+ if (uart_console(&p->port))
|
||
+ dev_warn(p->port.dev, "no atomic printing for rs485 consoles\n");
|
||
+#endif
|
||
+
|
||
hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
|
||
HRTIMER_MODE_REL);
|
||
hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
|
||
@@ -689,7 +694,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||
|
||
if (p->capabilities & UART_CAP_SLEEP) {
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&p->port.lock);
|
||
+ uart_port_lock_irq(&p->port);
|
||
if (p->capabilities & UART_CAP_EFR) {
|
||
lcr = serial_in(p, UART_LCR);
|
||
efr = serial_in(p, UART_EFR);
|
||
@@ -703,13 +708,17 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||
serial_out(p, UART_EFR, efr);
|
||
serial_out(p, UART_LCR, lcr);
|
||
}
|
||
- spin_unlock_irq(&p->port.lock);
|
||
+ uart_port_unlock_irq(&p->port);
|
||
}
|
||
|
||
serial8250_rpm_put(p);
|
||
}
|
||
|
||
-static void serial8250_clear_IER(struct uart_8250_port *up)
|
||
+/*
|
||
+ * Only to be used by write_atomic() and the legacy write(), which do not
|
||
+ * require port lock.
|
||
+ */
|
||
+static void __serial8250_clear_IER(struct uart_8250_port *up)
|
||
{
|
||
if (up->capabilities & UART_CAP_UUE)
|
||
serial_out(up, UART_IER, UART_IER_UUE);
|
||
@@ -717,6 +726,14 @@ static void serial8250_clear_IER(struct uart_8250_port *up)
|
||
serial_out(up, UART_IER, 0);
|
||
}
|
||
|
||
+static inline void serial8250_clear_IER(struct uart_8250_port *up)
|
||
+{
|
||
+ /* Port locked to synchronize UART_IER access against the console. */
|
||
+ lockdep_assert_held_once(&up->port.lock);
|
||
+
|
||
+ __serial8250_clear_IER(up);
|
||
+}
|
||
+
|
||
#ifdef CONFIG_SERIAL_8250_RSA
|
||
/*
|
||
* Attempts to turn on the RSA FIFO. Returns zero on failure.
|
||
@@ -746,9 +763,9 @@ static void enable_rsa(struct uart_8250_port *up)
|
||
{
|
||
if (up->port.type == PORT_RSA) {
|
||
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
__enable_rsa(up);
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
|
||
serial_out(up, UART_RSA_FRR, 0);
|
||
@@ -768,7 +785,7 @@ static void disable_rsa(struct uart_8250_port *up)
|
||
|
||
if (up->port.type == PORT_RSA &&
|
||
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
|
||
mode = serial_in(up, UART_RSA_MSR);
|
||
result = !(mode & UART_RSA_MSR_FIFO);
|
||
@@ -781,7 +798,7 @@ static void disable_rsa(struct uart_8250_port *up)
|
||
|
||
if (result)
|
||
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
}
|
||
#endif /* CONFIG_SERIAL_8250_RSA */
|
||
@@ -1172,7 +1189,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
up->capabilities = 0;
|
||
up->bugs = 0;
|
||
@@ -1211,7 +1228,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||
/*
|
||
* We failed; there's nothing here
|
||
*/
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
|
||
scratch2, scratch3);
|
||
goto out;
|
||
@@ -1235,7 +1252,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||
status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
|
||
serial8250_out_MCR(up, save_mcr);
|
||
if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
|
||
status1);
|
||
goto out;
|
||
@@ -1304,7 +1321,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||
serial8250_clear_IER(up);
|
||
|
||
out_unlock:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/*
|
||
* Check if the device is a Fintek F81216A
|
||
@@ -1341,9 +1358,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||
probe_irq_off(probe_irq_on());
|
||
save_mcr = serial8250_in_MCR(up);
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
save_ier = serial_in(up, UART_IER);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
|
||
|
||
irqs = probe_irq_on();
|
||
@@ -1356,9 +1373,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
|
||
}
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
serial_out(up, UART_IER, UART_IER_ALL_INTR);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
serial_in(up, UART_LSR);
|
||
serial_in(up, UART_RX);
|
||
serial_in(up, UART_IIR);
|
||
@@ -1369,9 +1386,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||
|
||
serial8250_out_MCR(up, save_mcr);
|
||
/* Synchronize UART_IER access against the console. */
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
serial_out(up, UART_IER, save_ier);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
|
||
if (port->flags & UPF_FOURPORT)
|
||
outb_p(save_ICP, ICP);
|
||
@@ -1436,13 +1453,13 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
|
||
unsigned long flags;
|
||
|
||
serial8250_rpm_get(p);
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
if (em485->active_timer == &em485->stop_tx_timer) {
|
||
p->rs485_stop_tx(p);
|
||
em485->active_timer = NULL;
|
||
em485->tx_stopped = true;
|
||
}
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
serial8250_rpm_put(p);
|
||
|
||
return HRTIMER_NORESTART;
|
||
@@ -1624,12 +1641,12 @@ static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
|
||
struct uart_8250_port *p = em485->port;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&p->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||
if (em485->active_timer == &em485->start_tx_timer) {
|
||
__start_tx(&p->port);
|
||
em485->active_timer = NULL;
|
||
}
|
||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
@@ -1912,7 +1929,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
|
||
if (iir & UART_IIR_NO_INT)
|
||
return 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
status = serial_lsr_in(up);
|
||
|
||
@@ -1982,9 +1999,9 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
|
||
if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
|
||
struct uart_8250_port *up = up_to_u8250p(port);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
serial8250_tx_chars(up);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
iir = serial_port_in(port, UART_IIR);
|
||
@@ -1999,10 +2016,10 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
|
||
|
||
serial8250_rpm_get(up);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
|
||
result = TIOCSER_TEMT;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
serial8250_rpm_put(up);
|
||
|
||
@@ -2064,13 +2081,13 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
|
||
serial8250_rpm_get(up);
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (break_state == -1)
|
||
up->lcr |= UART_LCR_SBC;
|
||
else
|
||
up->lcr &= ~UART_LCR_SBC;
|
||
serial_port_out(port, UART_LCR, up->lcr);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
serial8250_rpm_put(up);
|
||
}
|
||
|
||
@@ -2205,7 +2222,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
up->acr = 0;
|
||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||
serial_port_out(port, UART_EFR, UART_EFR_ECB);
|
||
@@ -2215,7 +2232,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||
serial_port_out(port, UART_EFR, UART_EFR_ECB);
|
||
serial_port_out(port, UART_LCR, 0);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
if (port->type == PORT_DA830) {
|
||
@@ -2224,10 +2241,10 @@ int serial8250_do_startup(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
serial_port_out(port, UART_IER, 0);
|
||
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
mdelay(10);
|
||
|
||
/* Enable Tx, Rx and free run mode */
|
||
@@ -2341,7 +2358,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
wait_for_xmitr(up, UART_LSR_THRE);
|
||
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
|
||
@@ -2353,7 +2370,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
iir = serial_port_in(port, UART_IIR);
|
||
serial_port_out(port, UART_IER, 0);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (port->irqflags & IRQF_SHARED)
|
||
enable_irq(port->irq);
|
||
@@ -2376,7 +2393,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
*/
|
||
serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (up->port.flags & UPF_FOURPORT) {
|
||
if (!up->port.irq)
|
||
up->port.mctrl |= TIOCM_OUT1;
|
||
@@ -2422,7 +2439,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||
}
|
||
|
||
dont_test_tx_en:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/*
|
||
* Clear the interrupt registers again for luck, and clear the
|
||
@@ -2493,17 +2510,17 @@ void serial8250_do_shutdown(struct uart_port *port)
|
||
*
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
up->ier = 0;
|
||
serial_port_out(port, UART_IER, 0);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
synchronize_irq(port->irq);
|
||
|
||
if (up->dma)
|
||
serial8250_release_dma(up);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (port->flags & UPF_FOURPORT) {
|
||
/* reset interrupts on the AST Fourport board */
|
||
inb((port->iobase & 0xfe0) | 0x1f);
|
||
@@ -2512,7 +2529,7 @@ void serial8250_do_shutdown(struct uart_port *port)
|
||
port->mctrl &= ~TIOCM_OUT2;
|
||
|
||
serial8250_set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/*
|
||
* Disable break condition and FIFOs
|
||
@@ -2748,14 +2765,14 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
|
||
quot = serial8250_get_divisor(port, baud, &frac);
|
||
|
||
serial8250_rpm_get(up);
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
serial8250_set_divisor(port, baud, quot, frac);
|
||
serial_port_out(port, UART_LCR, up->lcr);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
serial8250_rpm_put(up);
|
||
|
||
out_unlock:
|
||
@@ -2792,7 +2809,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
* Synchronize UART_IER access against the console.
|
||
*/
|
||
serial8250_rpm_get(up);
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
up->lcr = cval; /* Save computed LCR */
|
||
|
||
@@ -2895,7 +2912,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
|
||
}
|
||
serial8250_set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
serial8250_rpm_put(up);
|
||
|
||
/* Don't rewrite B0 */
|
||
@@ -2918,15 +2935,15 @@ void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||
{
|
||
if (termios->c_line == N_PPS) {
|
||
port->flags |= UPF_HARDPPS_CD;
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
serial8250_enable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
} else {
|
||
port->flags &= ~UPF_HARDPPS_CD;
|
||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
serial8250_disable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
}
|
||
}
|
||
}
|
||
@@ -3322,6 +3339,11 @@ static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
|
||
|
||
wait_for_xmitr(up, UART_LSR_THRE);
|
||
serial_port_out(port, UART_TX, ch);
|
||
+
|
||
+ if (ch == '\n')
|
||
+ up->console_newline_needed = false;
|
||
+ else
|
||
+ up->console_newline_needed = true;
|
||
}
|
||
|
||
/*
|
||
@@ -3350,6 +3372,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
|
||
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
|
||
}
|
||
|
||
+#ifdef CONFIG_SERIAL_8250_LEGACY_CONSOLE
|
||
/*
|
||
* Print a string to the serial port using the device FIFO
|
||
*
|
||
@@ -3400,15 +3423,15 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||
touch_nmi_watchdog();
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* First save the IER then disable the interrupts
|
||
*/
|
||
ier = serial_port_in(port, UART_IER);
|
||
- serial8250_clear_IER(up);
|
||
+ __serial8250_clear_IER(up);
|
||
|
||
/* check scratch reg to see if port powered off during system sleep */
|
||
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
|
||
@@ -3472,8 +3495,137 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||
serial8250_modem_status(up);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
+#else
|
||
+bool serial8250_console_write_thread(struct uart_8250_port *up,
|
||
+ struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct uart_8250_em485 *em485 = up->em485;
|
||
+ struct uart_port *port = &up->port;
|
||
+ bool done = false;
|
||
+ unsigned int ier;
|
||
+
|
||
+ touch_nmi_watchdog();
|
||
+
|
||
+ if (!nbcon_enter_unsafe(wctxt))
|
||
+ return false;
|
||
+
|
||
+ /* First save IER then disable the interrupts. */
|
||
+ ier = serial_port_in(port, UART_IER);
|
||
+ serial8250_clear_IER(up);
|
||
+
|
||
+ /* Check scratch reg if port powered off during system sleep. */
|
||
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
|
||
+ serial8250_console_restore(up);
|
||
+ up->canary = 0;
|
||
+ }
|
||
+
|
||
+ if (em485) {
|
||
+ if (em485->tx_stopped)
|
||
+ up->rs485_start_tx(up);
|
||
+ mdelay(port->rs485.delay_rts_before_send);
|
||
+ }
|
||
+
|
||
+ if (nbcon_exit_unsafe(wctxt)) {
|
||
+ int len = READ_ONCE(wctxt->len);
|
||
+ int i;
|
||
+
|
||
+ /*
|
||
+ * Write out the message. Toggle unsafe for each byte in order
|
||
+ * to give another (higher priority) context the opportunity
|
||
+ * for a friendly takeover. If such a takeover occurs, this
|
||
+ * context must reacquire ownership in order to perform final
|
||
+ * actions (such as re-enabling the interrupts).
|
||
+ *
|
||
+ * IMPORTANT: wctxt->outbuf and wctxt->len are no longer valid
|
||
+ * after a reacquire so writing the message must be
|
||
+ * aborted.
|
||
+ */
|
||
+ for (i = 0; i < len; i++) {
|
||
+ if (!nbcon_enter_unsafe(wctxt)) {
|
||
+ nbcon_reacquire(wctxt);
|
||
+ break;
|
||
+ }
|
||
+
|
||
+ uart_console_write(port, wctxt->outbuf + i, 1, serial8250_console_putchar);
|
||
+
|
||
+ if (!nbcon_exit_unsafe(wctxt)) {
|
||
+ nbcon_reacquire(wctxt);
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ done = (i == len);
|
||
+ } else {
|
||
+ nbcon_reacquire(wctxt);
|
||
+ }
|
||
+
|
||
+ while (!nbcon_enter_unsafe(wctxt))
|
||
+ nbcon_reacquire(wctxt);
|
||
+
|
||
+ /* Finally, wait for transmitter to become empty and restore IER. */
|
||
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
|
||
+ if (em485) {
|
||
+ mdelay(port->rs485.delay_rts_after_send);
|
||
+ if (em485->tx_stopped)
|
||
+ up->rs485_stop_tx(up);
|
||
+ }
|
||
+ serial_port_out(port, UART_IER, ier);
|
||
+
|
||
+ /*
|
||
+ * The receive handling will happen properly because the receive ready
|
||
+ * bit will still be set; it is not cleared on read. However, modem
|
||
+ * control will not, we must call it if we have saved something in the
|
||
+ * saved flags while processing with interrupts off.
|
||
+ */
|
||
+ if (up->msr_saved_flags)
|
||
+ serial8250_modem_status(up);
|
||
+
|
||
+ /* Success if no handover/takeover and message fully printed. */
|
||
+ return (nbcon_exit_unsafe(wctxt) && done);
|
||
+}
|
||
+
|
||
+bool serial8250_console_write_atomic(struct uart_8250_port *up,
|
||
+ struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct uart_port *port = &up->port;
|
||
+ unsigned int ier;
|
||
+
|
||
+ /* Atomic console not supported for rs485 mode. */
|
||
+ if (up->em485)
|
||
+ return false;
|
||
+
|
||
+ touch_nmi_watchdog();
|
||
+
|
||
+ if (!nbcon_enter_unsafe(wctxt))
|
||
+ return false;
|
||
+
|
||
+ /*
|
||
+ * First save IER then disable the interrupts. The special variant to
|
||
+ * clear IER is used because atomic printing may occur without holding
|
||
+ * the port lock.
|
||
+ */
|
||
+ ier = serial_port_in(port, UART_IER);
|
||
+ __serial8250_clear_IER(up);
|
||
+
|
||
+ /* Check scratch reg if port powered off during system sleep. */
|
||
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
|
||
+ serial8250_console_restore(up);
|
||
+ up->canary = 0;
|
||
+ }
|
||
+
|
||
+ if (up->console_newline_needed)
|
||
+ uart_console_write(port, "\n", 1, serial8250_console_putchar);
|
||
+ uart_console_write(port, wctxt->outbuf, wctxt->len, serial8250_console_putchar);
|
||
+
|
||
+ /* Finally, wait for transmitter to become empty and restore IER. */
|
||
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
|
||
+ serial_port_out(port, UART_IER, ier);
|
||
+
|
||
+ /* Success if no handover/takeover. */
|
||
+ return nbcon_exit_unsafe(wctxt);
|
||
+}
|
||
+#endif /* CONFIG_SERIAL_8250_LEGACY_CONSOLE */
|
||
|
||
static unsigned int probe_baud(struct uart_port *port)
|
||
{
|
||
@@ -3492,6 +3644,7 @@ static unsigned int probe_baud(struct uart_port *port)
|
||
|
||
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
|
||
{
|
||
+ struct uart_8250_port *up = up_to_u8250p(port);
|
||
int baud = 9600;
|
||
int bits = 8;
|
||
int parity = 'n';
|
||
@@ -3501,6 +3654,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
|
||
if (!port->iobase && !port->membase)
|
||
return -ENODEV;
|
||
|
||
+ up->console_newline_needed = false;
|
||
+
|
||
if (options)
|
||
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
||
else if (probe)
|
||
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
|
||
index 5fab4c978..7090b251d 100644
|
||
--- a/drivers/tty/serial/altera_jtaguart.c
|
||
+++ b/drivers/tty/serial/altera_jtaguart.c
|
||
@@ -147,14 +147,14 @@ static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
|
||
isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
|
||
ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
|
||
altera_jtaguart_rx_chars(port);
|
||
if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
|
||
altera_jtaguart_tx_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_RETVAL(isr);
|
||
}
|
||
@@ -180,14 +180,14 @@ static int altera_jtaguart_startup(struct uart_port *port)
|
||
return ret;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Enable RX interrupts now */
|
||
port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
|
||
writel(port->read_status_mask,
|
||
port->membase + ALTERA_JTAGUART_CONTROL_REG);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -196,14 +196,14 @@ static void altera_jtaguart_shutdown(struct uart_port *port)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable all interrupts now */
|
||
port->read_status_mask = 0;
|
||
writel(port->read_status_mask,
|
||
port->membase + ALTERA_JTAGUART_CONTROL_REG);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
free_irq(port->irq, port);
|
||
}
|
||
@@ -264,33 +264,33 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c
|
||
unsigned long flags;
|
||
u32 status;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
while (!altera_jtaguart_tx_space(port, &status)) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
|
||
return; /* no connection activity */
|
||
}
|
||
|
||
cpu_relax();
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
}
|
||
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
#else
|
||
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
while (!altera_jtaguart_tx_space(port, NULL)) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
cpu_relax();
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
}
|
||
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
#endif
|
||
|
||
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
|
||
index a9c419421..77835ac68 100644
|
||
--- a/drivers/tty/serial/altera_uart.c
|
||
+++ b/drivers/tty/serial/altera_uart.c
|
||
@@ -164,13 +164,13 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
|
||
struct altera_uart *pp = container_of(port, struct altera_uart, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (break_state == -1)
|
||
pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
|
||
else
|
||
pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
|
||
altera_uart_update_ctrl_reg(pp);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void altera_uart_set_termios(struct uart_port *port,
|
||
@@ -187,10 +187,10 @@ static void altera_uart_set_termios(struct uart_port *port,
|
||
tty_termios_copy_hw(termios, old);
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/*
|
||
* FIXME: port->read_status_mask and port->ignore_status_mask
|
||
@@ -264,12 +264,12 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
|
||
|
||
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
|
||
altera_uart_rx_chars(port);
|
||
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
|
||
altera_uart_tx_chars(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_RETVAL(isr);
|
||
}
|
||
@@ -313,13 +313,13 @@ static int altera_uart_startup(struct uart_port *port)
|
||
}
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Enable RX interrupts now */
|
||
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
|
||
altera_uart_update_ctrl_reg(pp);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -329,13 +329,13 @@ static void altera_uart_shutdown(struct uart_port *port)
|
||
struct altera_uart *pp = container_of(port, struct altera_uart, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable all interrupts now */
|
||
pp->imr = 0;
|
||
altera_uart_update_ctrl_reg(pp);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (port->irq)
|
||
free_irq(port->irq, port);
|
||
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
|
||
index b5a7404cb..eabbf8afc 100644
|
||
--- a/drivers/tty/serial/amba-pl010.c
|
||
+++ b/drivers/tty/serial/amba-pl010.c
|
||
@@ -207,7 +207,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
|
||
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
|
||
int handled = 0;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
status = readb(port->membase + UART010_IIR);
|
||
if (status) {
|
||
@@ -228,7 +228,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
|
||
handled = 1;
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_RETVAL(handled);
|
||
}
|
||
@@ -270,14 +270,14 @@ static void pl010_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
unsigned int lcr_h;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
lcr_h = readb(port->membase + UART010_LCRH);
|
||
if (break_state == -1)
|
||
lcr_h |= UART01x_LCRH_BRK;
|
||
else
|
||
lcr_h &= ~UART01x_LCRH_BRK;
|
||
writel(lcr_h, port->membase + UART010_LCRH);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int pl010_startup(struct uart_port *port)
|
||
@@ -385,7 +385,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (port->fifosize > 1)
|
||
lcr_h |= UART01x_LCRH_FEN;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -438,22 +438,22 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
writel(lcr_h, port->membase + UART010_LCRH);
|
||
writel(old_cr, port->membase + UART010_CR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||
{
|
||
if (termios->c_line == N_PPS) {
|
||
port->flags |= UPF_HARDPPS_CD;
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
pl010_enable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
} else {
|
||
port->flags &= ~UPF_HARDPPS_CD;
|
||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
pl010_disable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
}
|
||
}
|
||
}
|
||
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
||
index bb31ac9ca..1b265d358 100644
|
||
--- a/drivers/tty/serial/amba-pl011.c
|
||
+++ b/drivers/tty/serial/amba-pl011.c
|
||
@@ -2335,13 +2335,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||
|
||
clk_enable(uap->clk);
|
||
|
||
- local_irq_save(flags);
|
||
- if (uap->port.sysrq)
|
||
- locked = 0;
|
||
- else if (oops_in_progress)
|
||
- locked = uart_port_trylock(&uap->port);
|
||
- else
|
||
- uart_port_lock(&uap->port);
|
||
+ if (uap->port.sysrq || oops_in_progress)
|
||
+ locked = uart_port_trylock_irqsave(&uap->port, &flags);
|
||
+ else
|
||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||
|
||
/*
|
||
* First save the CR then disable the interrupts
|
||
@@ -2367,8 +2364,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||
pl011_write(old_cr, uap, REG_CR);
|
||
|
||
if (locked)
|
||
- uart_port_unlock(&uap->port);
|
||
- local_irq_restore(flags);
|
||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||
|
||
clk_disable(uap->clk);
|
||
}
|
||
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
|
||
index d3cb341f2..364599f25 100644
|
||
--- a/drivers/tty/serial/apbuart.c
|
||
+++ b/drivers/tty/serial/apbuart.c
|
||
@@ -133,7 +133,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
|
||
struct uart_port *port = dev_id;
|
||
unsigned int status;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
status = UART_GET_STATUS(port);
|
||
if (status & UART_STATUS_DR)
|
||
@@ -141,7 +141,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
|
||
if (status & UART_STATUS_THE)
|
||
apbuart_tx_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -228,7 +228,7 @@ static void apbuart_set_termios(struct uart_port *port,
|
||
if (termios->c_cflag & CRTSCTS)
|
||
cr |= UART_CTRL_FL;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Update the per-port timeout. */
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
@@ -251,7 +251,7 @@ static void apbuart_set_termios(struct uart_port *port,
|
||
UART_PUT_SCAL(port, quot);
|
||
UART_PUT_CTRL(port, cr);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *apbuart_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
|
||
index 924c1a893..ffd234673 100644
|
||
--- a/drivers/tty/serial/ar933x_uart.c
|
||
+++ b/drivers/tty/serial/ar933x_uart.c
|
||
@@ -133,9 +133,9 @@ static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int rdata;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
|
||
}
|
||
@@ -220,14 +220,14 @@ static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
|
||
container_of(port, struct ar933x_uart_port, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (break_state == -1)
|
||
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
|
||
AR933X_UART_CS_TX_BREAK);
|
||
else
|
||
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
|
||
AR933X_UART_CS_TX_BREAK);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -318,7 +318,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/* disable the UART */
|
||
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
|
||
@@ -352,7 +352,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
|
||
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
|
||
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
if (tty_termios_baud_rate(new))
|
||
tty_termios_encode_baud_rate(new, baud, baud);
|
||
@@ -450,7 +450,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
|
||
if ((status & AR933X_UART_CS_HOST_INT) == 0)
|
||
return IRQ_NONE;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
|
||
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
|
||
@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
|
||
ar933x_uart_tx_chars(up);
|
||
}
|
||
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -485,7 +485,7 @@ static int ar933x_uart_startup(struct uart_port *port)
|
||
if (ret)
|
||
return ret;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/* Enable HOST interrupts */
|
||
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
|
||
@@ -498,7 +498,7 @@ static int ar933x_uart_startup(struct uart_port *port)
|
||
/* Enable RX interrupts */
|
||
ar933x_uart_start_rx_interrupt(up);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -632,9 +632,9 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
|
||
if (up->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&up->port.lock);
|
||
+ locked = uart_port_trylock(&up->port);
|
||
else
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
/*
|
||
* First save the IER then disable the interrupts
|
||
@@ -654,7 +654,7 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
|
||
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
|
||
|
||
if (locked)
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
local_irq_restore(flags);
|
||
}
|
||
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
|
||
index ad4ae19b6..1aa5b2b49 100644
|
||
--- a/drivers/tty/serial/arc_uart.c
|
||
+++ b/drivers/tty/serial/arc_uart.c
|
||
@@ -279,9 +279,9 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
|
||
if (status & RXIENB) {
|
||
|
||
/* already in ISR, no need of xx_irqsave */
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
arc_serial_rx_chars(port, status);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
}
|
||
|
||
if ((status & TXIENB) && (status & TXEMPTY)) {
|
||
@@ -291,12 +291,12 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
|
||
*/
|
||
UART_TX_IRQ_DISABLE(port);
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (!uart_tx_stopped(port))
|
||
arc_serial_tx_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
}
|
||
|
||
return IRQ_HANDLED;
|
||
@@ -366,7 +366,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
|
||
uartl = hw_val & 0xFF;
|
||
uarth = (hw_val >> 8) & 0xFF;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
UART_ALL_IRQ_DISABLE(port);
|
||
|
||
@@ -391,7 +391,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
|
||
|
||
uart_update_timeout(port, new->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *arc_serial_type(struct uart_port *port)
|
||
@@ -521,9 +521,9 @@ static void arc_serial_console_write(struct console *co, const char *s,
|
||
struct uart_port *port = &arc_uart_ports[co->index].port;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
uart_console_write(port, s, count, arc_serial_console_putchar);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static struct console arc_console = {
|
||
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
|
||
index bcca5627a..5a3f2fc47 100644
|
||
--- a/drivers/tty/serial/atmel_serial.c
|
||
+++ b/drivers/tty/serial/atmel_serial.c
|
||
@@ -861,7 +861,7 @@ static void atmel_complete_tx_dma(void *arg)
|
||
struct dma_chan *chan = atmel_port->chan_tx;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (chan)
|
||
dmaengine_terminate_all(chan);
|
||
@@ -893,7 +893,7 @@ static void atmel_complete_tx_dma(void *arg)
|
||
atmel_port->tx_done_mask);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void atmel_release_tx_dma(struct uart_port *port)
|
||
@@ -1711,9 +1711,9 @@ static void atmel_tasklet_rx_func(struct tasklet_struct *t)
|
||
struct uart_port *port = &atmel_port->uart;
|
||
|
||
/* The interrupt handler does not take the lock */
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
atmel_port->schedule_rx(port);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
}
|
||
|
||
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
|
||
@@ -1723,9 +1723,9 @@ static void atmel_tasklet_tx_func(struct tasklet_struct *t)
|
||
struct uart_port *port = &atmel_port->uart;
|
||
|
||
/* The interrupt handler does not take the lock */
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
atmel_port->schedule_tx(port);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
}
|
||
|
||
static void atmel_init_property(struct atmel_uart_port *atmel_port,
|
||
@@ -2175,7 +2175,7 @@ static void atmel_set_termios(struct uart_port *port,
|
||
} else
|
||
mode |= ATMEL_US_PAR_NONE;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
port->read_status_mask = ATMEL_US_OVRE;
|
||
if (termios->c_iflag & INPCK)
|
||
@@ -2377,22 +2377,22 @@ static void atmel_set_termios(struct uart_port *port,
|
||
else
|
||
atmel_disable_ms(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||
{
|
||
if (termios->c_line == N_PPS) {
|
||
port->flags |= UPF_HARDPPS_CD;
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
atmel_enable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
} else {
|
||
port->flags &= ~UPF_HARDPPS_CD;
|
||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
atmel_disable_ms(port);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
}
|
||
}
|
||
}
|
||
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
|
||
index 44c27e5ce..b104c36ce 100644
|
||
--- a/drivers/tty/serial/bcm63xx_uart.c
|
||
+++ b/drivers/tty/serial/bcm63xx_uart.c
|
||
@@ -201,7 +201,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
|
||
unsigned long flags;
|
||
unsigned int val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = bcm_uart_readl(port, UART_CTL_REG);
|
||
if (ctl)
|
||
@@ -210,7 +210,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
|
||
val &= ~UART_CTL_XMITBRK_MASK;
|
||
bcm_uart_writel(port, val, UART_CTL_REG);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -335,7 +335,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
|
||
unsigned int irqstat;
|
||
|
||
port = dev_id;
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
irqstat = bcm_uart_readl(port, UART_IR_REG);
|
||
if (irqstat & UART_RX_INT_STAT)
|
||
@@ -356,7 +356,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
|
||
estat & UART_EXTINP_DCD_MASK);
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -454,9 +454,9 @@ static void bcm_uart_shutdown(struct uart_port *port)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
bcm_uart_writel(port, 0, UART_IR_REG);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
bcm_uart_disable(port);
|
||
bcm_uart_flush(port);
|
||
@@ -473,7 +473,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
unsigned long flags;
|
||
int tries;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Drain the hot tub fully before we power it off for the winter. */
|
||
for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
|
||
@@ -549,7 +549,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
|
||
uart_update_timeout(port, new->c_cflag, baud);
|
||
bcm_uart_enable(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -715,9 +715,9 @@ static void bcm_console_write(struct console *co, const char *s,
|
||
/* bcm_uart_interrupt() already took the lock */
|
||
locked = 0;
|
||
} else if (oops_in_progress) {
|
||
- locked = spin_trylock(&port->lock);
|
||
+ locked = uart_port_trylock(port);
|
||
} else {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
locked = 1;
|
||
}
|
||
|
||
@@ -728,7 +728,7 @@ static void bcm_console_write(struct console *co, const char *s,
|
||
wait_for_xmitr(port);
|
||
|
||
if (locked)
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
local_irq_restore(flags);
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/cpm_uart.c b/drivers/tty/serial/cpm_uart.c
|
||
index 626423022..be4af6eda 100644
|
||
--- a/drivers/tty/serial/cpm_uart.c
|
||
+++ b/drivers/tty/serial/cpm_uart.c
|
||
@@ -569,7 +569,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
|
||
if ((termios->c_cflag & CREAD) == 0)
|
||
port->read_status_mask &= ~BD_SC_EMPTY;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (IS_SMC(pinfo)) {
|
||
unsigned int bits = tty_get_frame_size(termios->c_cflag);
|
||
@@ -609,7 +609,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
|
||
clk_set_rate(pinfo->clk, baud);
|
||
else
|
||
cpm_setbrg(pinfo->brg - 1, baud);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *cpm_uart_type(struct uart_port *port)
|
||
@@ -1386,9 +1386,9 @@ static void cpm_uart_console_write(struct console *co, const char *s,
|
||
cpm_uart_early_write(pinfo, s, count, true);
|
||
local_irq_restore(flags);
|
||
} else {
|
||
- spin_lock_irqsave(&pinfo->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&pinfo->port, &flags);
|
||
cpm_uart_early_write(pinfo, s, count, true);
|
||
- spin_unlock_irqrestore(&pinfo->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&pinfo->port, flags);
|
||
}
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
|
||
index 128b5479e..5004125f3 100644
|
||
--- a/drivers/tty/serial/digicolor-usart.c
|
||
+++ b/drivers/tty/serial/digicolor-usart.c
|
||
@@ -133,7 +133,7 @@ static void digicolor_uart_rx(struct uart_port *port)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
while (1) {
|
||
u8 status, ch, ch_flag;
|
||
@@ -172,7 +172,7 @@ static void digicolor_uart_rx(struct uart_port *port)
|
||
ch_flag);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
tty_flip_buffer_push(&port->state->port);
|
||
}
|
||
@@ -185,7 +185,7 @@ static void digicolor_uart_tx(struct uart_port *port)
|
||
if (digicolor_uart_tx_full(port))
|
||
return;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (port->x_char) {
|
||
writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
|
||
@@ -211,7 +211,7 @@ static void digicolor_uart_tx(struct uart_port *port)
|
||
uart_write_wakeup(port);
|
||
|
||
out:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
|
||
@@ -333,7 +333,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
|
||
port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
|
||
| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
@@ -341,7 +341,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
|
||
writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
|
||
writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *digicolor_uart_type(struct uart_port *port)
|
||
@@ -398,14 +398,14 @@ static void digicolor_uart_console_write(struct console *co, const char *c,
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_console_write(port, c, n, digicolor_uart_console_putchar);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Wait for transmitter to become empty */
|
||
do {
|
||
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
|
||
index 667f52e83..6df7af9ed 100644
|
||
--- a/drivers/tty/serial/dz.c
|
||
+++ b/drivers/tty/serial/dz.c
|
||
@@ -268,9 +268,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
|
||
}
|
||
/* If nothing to do or stopped or hardware stopped. */
|
||
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
|
||
- spin_lock(&dport->port.lock);
|
||
+ uart_port_lock(&dport->port);
|
||
dz_stop_tx(&dport->port);
|
||
- spin_unlock(&dport->port.lock);
|
||
+ uart_port_unlock(&dport->port);
|
||
return;
|
||
}
|
||
|
||
@@ -287,9 +287,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
|
||
|
||
/* Are we are done. */
|
||
if (uart_circ_empty(xmit)) {
|
||
- spin_lock(&dport->port.lock);
|
||
+ uart_port_lock(&dport->port);
|
||
dz_stop_tx(&dport->port);
|
||
- spin_unlock(&dport->port.lock);
|
||
+ uart_port_unlock(&dport->port);
|
||
}
|
||
}
|
||
|
||
@@ -415,14 +415,14 @@ static int dz_startup(struct uart_port *uport)
|
||
return ret;
|
||
}
|
||
|
||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||
|
||
/* Enable interrupts. */
|
||
tmp = dz_in(dport, DZ_CSR);
|
||
tmp |= DZ_RIE | DZ_TIE;
|
||
dz_out(dport, DZ_CSR, tmp);
|
||
|
||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -443,9 +443,9 @@ static void dz_shutdown(struct uart_port *uport)
|
||
int irq_guard;
|
||
u16 tmp;
|
||
|
||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||
dz_stop_tx(&dport->port);
|
||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||
|
||
irq_guard = atomic_add_return(-1, &mux->irq_guard);
|
||
if (!irq_guard) {
|
||
@@ -491,14 +491,14 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
|
||
unsigned long flags;
|
||
unsigned short tmp, mask = 1 << dport->port.line;
|
||
|
||
- spin_lock_irqsave(&uport->lock, flags);
|
||
+ uart_port_lock_irqsave(uport, &flags);
|
||
tmp = dz_in(dport, DZ_TCR);
|
||
if (break_state)
|
||
tmp |= mask;
|
||
else
|
||
tmp &= ~mask;
|
||
dz_out(dport, DZ_TCR, tmp);
|
||
- spin_unlock_irqrestore(&uport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(uport, flags);
|
||
}
|
||
|
||
static int dz_encode_baud_rate(unsigned int baud)
|
||
@@ -608,7 +608,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||
if (termios->c_cflag & CREAD)
|
||
cflag |= DZ_RXENAB;
|
||
|
||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||
|
||
uart_update_timeout(uport, termios->c_cflag, baud);
|
||
|
||
@@ -631,7 +631,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||
if (termios->c_iflag & IGNBRK)
|
||
dport->port.ignore_status_mask |= DZ_BREAK;
|
||
|
||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -645,12 +645,12 @@ static void dz_pm(struct uart_port *uport, unsigned int state,
|
||
struct dz_port *dport = to_dport(uport);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||
if (state < 3)
|
||
dz_start_tx(&dport->port);
|
||
else
|
||
dz_stop_tx(&dport->port);
|
||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||
}
|
||
|
||
|
||
@@ -811,7 +811,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
|
||
unsigned short csr, tcr, trdy, mask;
|
||
int loops = 10000;
|
||
|
||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||
csr = dz_in(dport, DZ_CSR);
|
||
dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
|
||
tcr = dz_in(dport, DZ_TCR);
|
||
@@ -819,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
|
||
mask = tcr;
|
||
dz_out(dport, DZ_TCR, mask);
|
||
iob();
|
||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||
|
||
do {
|
||
trdy = dz_in(dport, DZ_CSR);
|
||
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
|
||
index 249cb380c..7fa809a40 100644
|
||
--- a/drivers/tty/serial/fsl_linflexuart.c
|
||
+++ b/drivers/tty/serial/fsl_linflexuart.c
|
||
@@ -203,7 +203,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
|
||
struct circ_buf *xmit = &sport->state->xmit;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->lock, flags);
|
||
+ uart_port_lock_irqsave(sport, &flags);
|
||
|
||
if (sport->x_char) {
|
||
linflex_put_char(sport, sport->x_char);
|
||
@@ -217,7 +217,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
|
||
|
||
linflex_transmit_buffer(sport);
|
||
out:
|
||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(sport, flags);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -230,7 +230,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
|
||
unsigned char rx;
|
||
bool brk;
|
||
|
||
- spin_lock_irqsave(&sport->lock, flags);
|
||
+ uart_port_lock_irqsave(sport, &flags);
|
||
|
||
status = readl(sport->membase + UARTSR);
|
||
while (status & LINFLEXD_UARTSR_RMB) {
|
||
@@ -266,7 +266,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
|
||
}
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(sport, flags);
|
||
|
||
tty_flip_buffer_push(port);
|
||
|
||
@@ -369,11 +369,11 @@ static int linflex_startup(struct uart_port *port)
|
||
int ret = 0;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
linflex_setup_watermark(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
|
||
DRIVER_NAME, port);
|
||
@@ -386,14 +386,14 @@ static void linflex_shutdown(struct uart_port *port)
|
||
unsigned long ier;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* disable interrupts */
|
||
ier = readl(port->membase + LINIER);
|
||
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
|
||
writel(ier, port->membase + LINIER);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
devm_free_irq(port->dev, port->irq, port);
|
||
}
|
||
@@ -474,7 +474,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
cr &= ~LINFLEXD_UARTCR_PCE;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
port->read_status_mask = 0;
|
||
|
||
@@ -507,7 +507,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
writel(cr1, port->membase + LINCR1);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *linflex_type(struct uart_port *port)
|
||
@@ -646,14 +646,14 @@ linflex_console_write(struct console *co, const char *s, unsigned int count)
|
||
if (sport->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&sport->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(sport, &flags);
|
||
else
|
||
- spin_lock_irqsave(&sport->lock, flags);
|
||
+ uart_port_lock_irqsave(sport, &flags);
|
||
|
||
linflex_string_write(sport, s, count);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(sport, flags);
|
||
}
|
||
|
||
/*
|
||
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
|
||
index 8bd0f8e45..58c2c460c 100644
|
||
--- a/drivers/tty/serial/fsl_lpuart.c
|
||
+++ b/drivers/tty/serial/fsl_lpuart.c
|
||
@@ -532,9 +532,9 @@ static void lpuart_dma_tx_complete(void *arg)
|
||
struct dma_chan *chan = sport->dma_tx_chan;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (!sport->dma_tx_in_progress) {
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
return;
|
||
}
|
||
|
||
@@ -543,7 +543,7 @@ static void lpuart_dma_tx_complete(void *arg)
|
||
|
||
uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
|
||
sport->dma_tx_in_progress = false;
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
||
uart_write_wakeup(&sport->port);
|
||
@@ -553,12 +553,12 @@ static void lpuart_dma_tx_complete(void *arg)
|
||
return;
|
||
}
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
if (!lpuart_stopped_or_empty(&sport->port))
|
||
lpuart_dma_tx(sport);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
|
||
@@ -651,7 +651,7 @@ static int lpuart_poll_init(struct uart_port *port)
|
||
|
||
sport->port.fifosize = 0;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
/* Disable Rx & Tx */
|
||
writeb(0, sport->port.membase + UARTCR2);
|
||
|
||
@@ -675,7 +675,7 @@ static int lpuart_poll_init(struct uart_port *port)
|
||
|
||
/* Enable Rx and Tx */
|
||
writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -703,7 +703,7 @@ static int lpuart32_poll_init(struct uart_port *port)
|
||
|
||
sport->port.fifosize = 0;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/* Disable Rx & Tx */
|
||
lpuart32_write(&sport->port, 0, UARTCTRL);
|
||
@@ -724,7 +724,7 @@ static int lpuart32_poll_init(struct uart_port *port)
|
||
|
||
/* Enable Rx and Tx */
|
||
lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -879,9 +879,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
|
||
|
||
static void lpuart_txint(struct lpuart_port *sport)
|
||
{
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
lpuart_transmit_buffer(sport);
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
}
|
||
|
||
static void lpuart_rxint(struct lpuart_port *sport)
|
||
@@ -890,7 +890,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
|
||
struct tty_port *port = &sport->port.state->port;
|
||
unsigned char rx, sr;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
|
||
while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
|
||
flg = TTY_NORMAL;
|
||
@@ -956,9 +956,9 @@ static void lpuart_rxint(struct lpuart_port *sport)
|
||
|
||
static void lpuart32_txint(struct lpuart_port *sport)
|
||
{
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
lpuart32_transmit_buffer(sport);
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
}
|
||
|
||
static void lpuart32_rxint(struct lpuart_port *sport)
|
||
@@ -968,7 +968,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
|
||
unsigned long rx, sr;
|
||
bool is_break;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
|
||
while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
|
||
flg = TTY_NORMAL;
|
||
@@ -1170,12 +1170,12 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
|
||
|
||
async_tx_ack(sport->dma_rx_desc);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
|
||
if (dmastat == DMA_ERROR) {
|
||
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
return;
|
||
}
|
||
|
||
@@ -1244,7 +1244,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
|
||
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
|
||
DMA_FROM_DEVICE);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
tty_flip_buffer_push(port);
|
||
if (!sport->dma_idle_int)
|
||
@@ -1335,9 +1335,9 @@ static void lpuart_timer_func(struct timer_list *t)
|
||
mod_timer(&sport->lpuart_timer,
|
||
jiffies + sport->dma_rx_timeout);
|
||
|
||
- if (spin_trylock_irqsave(&sport->port.lock, flags)) {
|
||
+ if (uart_port_trylock_irqsave(&sport->port, &flags)) {
|
||
sport->last_residue = state.residue;
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
}
|
||
|
||
@@ -1802,14 +1802,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
lpuart_setup_watermark_enable(sport);
|
||
|
||
lpuart_rx_dma_startup(sport);
|
||
lpuart_tx_dma_startup(sport);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static int lpuart_startup(struct uart_port *port)
|
||
@@ -1859,7 +1859,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
lpuart32_hw_disable(sport);
|
||
|
||
@@ -1869,7 +1869,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
|
||
lpuart32_setup_watermark_enable(sport);
|
||
lpuart32_configure(sport);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static int lpuart32_startup(struct uart_port *port)
|
||
@@ -1932,7 +1932,7 @@ static void lpuart_shutdown(struct uart_port *port)
|
||
unsigned char temp;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* disable Rx/Tx and interrupts */
|
||
temp = readb(port->membase + UARTCR2);
|
||
@@ -1940,7 +1940,7 @@ static void lpuart_shutdown(struct uart_port *port)
|
||
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
|
||
writeb(temp, port->membase + UARTCR2);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
lpuart_dma_shutdown(sport);
|
||
}
|
||
@@ -1952,7 +1952,7 @@ static void lpuart32_shutdown(struct uart_port *port)
|
||
unsigned long temp;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* clear status */
|
||
temp = lpuart32_read(&sport->port, UARTSTAT);
|
||
@@ -1969,7 +1969,7 @@ static void lpuart32_shutdown(struct uart_port *port)
|
||
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
|
||
lpuart32_write(port, temp, UARTCTRL);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
lpuart_dma_shutdown(sport);
|
||
}
|
||
@@ -2069,7 +2069,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (old && sport->lpuart_dma_rx_use)
|
||
lpuart_dma_rx_free(&sport->port);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
sport->port.read_status_mask = 0;
|
||
if (termios->c_iflag & INPCK)
|
||
@@ -2124,7 +2124,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
sport->lpuart_dma_rx_use = false;
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static void __lpuart32_serial_setbrg(struct uart_port *port,
|
||
@@ -2304,7 +2304,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (old && sport->lpuart_dma_rx_use)
|
||
lpuart_dma_rx_free(&sport->port);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
sport->port.read_status_mask = 0;
|
||
if (termios->c_iflag & INPCK)
|
||
@@ -2362,7 +2362,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
sport->lpuart_dma_rx_use = false;
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static const char *lpuart_type(struct uart_port *port)
|
||
@@ -2480,9 +2480,9 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/* first save CR2 and then disable interrupts */
|
||
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
|
||
@@ -2498,7 +2498,7 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
|
||
writeb(old_cr2, sport->port.membase + UARTCR2);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static void
|
||
@@ -2510,9 +2510,9 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/* first save CR2 and then disable interrupts */
|
||
cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
|
||
@@ -2528,7 +2528,7 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
|
||
lpuart32_write(&sport->port, old_cr, UARTCTRL);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -3093,7 +3093,7 @@ static int lpuart_suspend(struct device *dev)
|
||
uart_suspend_port(&lpuart_reg, &sport->port);
|
||
|
||
if (lpuart_uport_is_active(sport)) {
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (lpuart_is_32(sport)) {
|
||
/* disable Rx/Tx and interrupts */
|
||
temp = lpuart32_read(&sport->port, UARTCTRL);
|
||
@@ -3105,7 +3105,7 @@ static int lpuart_suspend(struct device *dev)
|
||
temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
|
||
writeb(temp, sport->port.membase + UARTCR2);
|
||
}
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
if (sport->lpuart_dma_rx_use) {
|
||
/*
|
||
@@ -3118,7 +3118,7 @@ static int lpuart_suspend(struct device *dev)
|
||
lpuart_dma_rx_free(&sport->port);
|
||
|
||
/* Disable Rx DMA to use UART port as wakeup source */
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (lpuart_is_32(sport)) {
|
||
temp = lpuart32_read(&sport->port, UARTBAUD);
|
||
lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
|
||
@@ -3127,11 +3127,11 @@ static int lpuart_suspend(struct device *dev)
|
||
writeb(readb(sport->port.membase + UARTCR5) &
|
||
~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
|
||
}
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
if (sport->lpuart_dma_tx_use) {
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (lpuart_is_32(sport)) {
|
||
temp = lpuart32_read(&sport->port, UARTBAUD);
|
||
temp &= ~UARTBAUD_TDMAE;
|
||
@@ -3141,7 +3141,7 @@ static int lpuart_suspend(struct device *dev)
|
||
temp &= ~UARTCR5_TDMAS;
|
||
writeb(temp, sport->port.membase + UARTCR5);
|
||
}
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
sport->dma_tx_in_progress = false;
|
||
dmaengine_terminate_sync(sport->dma_tx_chan);
|
||
}
|
||
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
|
||
index 819f957b6..a75eafbcb 100644
|
||
--- a/drivers/tty/serial/icom.c
|
||
+++ b/drivers/tty/serial/icom.c
|
||
@@ -929,7 +929,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
|
||
char delta_status;
|
||
unsigned char status;
|
||
|
||
- spin_lock(&icom_port->uart_port.lock);
|
||
+ uart_port_lock(&icom_port->uart_port);
|
||
|
||
/*modem input register */
|
||
status = readb(&icom_port->dram->isr);
|
||
@@ -951,7 +951,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
|
||
port.delta_msr_wait);
|
||
old_status = status;
|
||
}
|
||
- spin_unlock(&icom_port->uart_port.lock);
|
||
+ uart_port_unlock(&icom_port->uart_port);
|
||
}
|
||
|
||
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
|
||
@@ -1093,7 +1093,7 @@ static void process_interrupt(u16 port_int_reg,
|
||
struct icom_port *icom_port)
|
||
{
|
||
|
||
- spin_lock(&icom_port->uart_port.lock);
|
||
+ uart_port_lock(&icom_port->uart_port);
|
||
trace(icom_port, "INTERRUPT", port_int_reg);
|
||
|
||
if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
|
||
@@ -1102,7 +1102,7 @@ static void process_interrupt(u16 port_int_reg,
|
||
if (port_int_reg & INT_RCV_COMPLETED)
|
||
recv_interrupt(port_int_reg, icom_port);
|
||
|
||
- spin_unlock(&icom_port->uart_port.lock);
|
||
+ uart_port_unlock(&icom_port->uart_port);
|
||
}
|
||
|
||
static irqreturn_t icom_interrupt(int irq, void *dev_id)
|
||
@@ -1186,14 +1186,14 @@ static unsigned int icom_tx_empty(struct uart_port *port)
|
||
int ret;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
|
||
SA_FLAGS_READY_TO_XMIT)
|
||
ret = TIOCSER_TEMT;
|
||
else
|
||
ret = 0;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return ret;
|
||
}
|
||
|
||
@@ -1276,7 +1276,7 @@ static void icom_send_xchar(struct uart_port *port, char ch)
|
||
|
||
/* wait .1 sec to send char */
|
||
for (index = 0; index < 10; index++) {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
xdata = readb(&icom_port->dram->xchar);
|
||
if (xdata == 0x00) {
|
||
trace(icom_port, "QUICK_WRITE", 0);
|
||
@@ -1284,10 +1284,10 @@ static void icom_send_xchar(struct uart_port *port, char ch)
|
||
|
||
/* flush write operation */
|
||
xdata = readb(&icom_port->dram->xchar);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
break;
|
||
}
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
msleep(10);
|
||
}
|
||
}
|
||
@@ -1307,7 +1307,7 @@ static void icom_break(struct uart_port *port, int break_state)
|
||
unsigned char cmdReg;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
trace(icom_port, "BREAK", 0);
|
||
cmdReg = readb(&icom_port->dram->CmdReg);
|
||
if (break_state == -1) {
|
||
@@ -1315,7 +1315,7 @@ static void icom_break(struct uart_port *port, int break_state)
|
||
} else {
|
||
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
|
||
}
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int icom_open(struct uart_port *port)
|
||
@@ -1365,7 +1365,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
unsigned long offset;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
trace(icom_port, "CHANGE_SPEED", 0);
|
||
|
||
cflag = termios->c_cflag;
|
||
@@ -1516,7 +1516,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
trace(icom_port, "XR_ENAB", 0);
|
||
writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *icom_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
|
||
index 349d4849b..3934ab8fe 100644
|
||
--- a/drivers/tty/serial/imx.c
|
||
+++ b/drivers/tty/serial/imx.c
|
||
@@ -586,7 +586,7 @@ static void imx_uart_dma_tx_callback(void *data)
|
||
unsigned long flags;
|
||
u32 ucr1;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
|
||
|
||
@@ -611,7 +611,7 @@ static void imx_uart_dma_tx_callback(void *data)
|
||
imx_uart_writel(sport, ucr4, UCR4);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
/* called with port.lock taken and irqs off */
|
||
@@ -797,11 +797,11 @@ static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
|
||
struct imx_port *sport = dev_id;
|
||
irqreturn_t ret;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
|
||
ret = __imx_uart_rtsint(irq, dev_id);
|
||
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
|
||
return ret;
|
||
}
|
||
@@ -810,9 +810,9 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
|
||
{
|
||
struct imx_port *sport = dev_id;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
imx_uart_transmit_buffer(sport);
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -926,11 +926,11 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
|
||
struct imx_port *sport = dev_id;
|
||
irqreturn_t ret;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
|
||
ret = __imx_uart_rxint(irq, dev_id);
|
||
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
|
||
return ret;
|
||
}
|
||
@@ -993,7 +993,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
|
||
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
|
||
irqreturn_t ret = IRQ_NONE;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
|
||
usr1 = imx_uart_readl(sport, USR1);
|
||
usr2 = imx_uart_readl(sport, USR2);
|
||
@@ -1063,7 +1063,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
|
||
ret = IRQ_HANDLED;
|
||
}
|
||
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
|
||
return ret;
|
||
}
|
||
@@ -1146,7 +1146,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
u32 ucr1;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
|
||
|
||
@@ -1155,7 +1155,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
|
||
|
||
imx_uart_writel(sport, ucr1, UCR1);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -1168,9 +1168,9 @@ static void imx_uart_timeout(struct timer_list *t)
|
||
unsigned long flags;
|
||
|
||
if (sport->port.state) {
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
imx_uart_mctrl_check(sport);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
|
||
}
|
||
@@ -1200,9 +1200,9 @@ static void imx_uart_dma_rx_callback(void *data)
|
||
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
|
||
|
||
if (status == DMA_ERROR) {
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
imx_uart_clear_rx_errors(sport);
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
return;
|
||
}
|
||
|
||
@@ -1231,9 +1231,9 @@ static void imx_uart_dma_rx_callback(void *data)
|
||
r_bytes = rx_ring->head - rx_ring->tail;
|
||
|
||
/* If we received something, check for 0xff flood */
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
|
||
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
|
||
|
||
@@ -1491,7 +1491,7 @@ static int imx_uart_startup(struct uart_port *port)
|
||
if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
|
||
dma_is_inited = 1;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/* Reset fifo's and state machines */
|
||
imx_uart_soft_reset(sport);
|
||
@@ -1564,7 +1564,7 @@ static int imx_uart_startup(struct uart_port *port)
|
||
|
||
imx_uart_disable_loopback_rs485(sport);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -1589,21 +1589,21 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||
sport->dma_is_rxing = 0;
|
||
}
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
imx_uart_stop_tx(port);
|
||
imx_uart_stop_rx(port);
|
||
imx_uart_disable_dma(sport);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
imx_uart_dma_exit(sport);
|
||
}
|
||
|
||
mctrl_gpio_disable_ms(sport->gpios);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
ucr2 = imx_uart_readl(sport, UCR2);
|
||
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
|
||
imx_uart_writel(sport, ucr2, UCR2);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
/*
|
||
* Stop our timer.
|
||
@@ -1614,7 +1614,7 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||
* Disable all interrupts, port and break condition.
|
||
*/
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
ucr1 = imx_uart_readl(sport, UCR1);
|
||
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
|
||
@@ -1636,7 +1636,7 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||
ucr4 &= ~UCR4_TCEN;
|
||
imx_uart_writel(sport, ucr4, UCR4);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
clk_disable_unprepare(sport->clk_per);
|
||
clk_disable_unprepare(sport->clk_ipg);
|
||
@@ -1699,7 +1699,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
|
||
quot = uart_get_divisor(port, baud);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/*
|
||
* Read current UCR2 and save it for future use, then clear all the bits
|
||
@@ -1827,7 +1827,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
|
||
imx_uart_enable_ms(&sport->port);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static const char *imx_uart_type(struct uart_port *port)
|
||
@@ -1889,7 +1889,7 @@ static int imx_uart_poll_init(struct uart_port *port)
|
||
|
||
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/*
|
||
* Be careful about the order of enabling bits here. First enable the
|
||
@@ -1917,7 +1917,7 @@ static int imx_uart_poll_init(struct uart_port *port)
|
||
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
|
||
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -2037,9 +2037,9 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
|
||
if (sport->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
/*
|
||
* First, save UCR1/2/3 and then disable interrupts
|
||
@@ -2067,7 +2067,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
|
||
imx_uart_ucrs_restore(sport, &old_ucr);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -2225,10 +2225,10 @@ static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t)
|
||
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (sport->tx_state == WAIT_AFTER_RTS)
|
||
imx_uart_start_tx(&sport->port);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
@@ -2238,10 +2238,10 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
|
||
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (sport->tx_state == WAIT_AFTER_SEND)
|
||
imx_uart_stop_tx(&sport->port);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
@@ -2508,9 +2508,9 @@ static void imx_uart_restore_context(struct imx_port *sport)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
if (!sport->context_saved) {
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
return;
|
||
}
|
||
|
||
@@ -2525,7 +2525,7 @@ static void imx_uart_restore_context(struct imx_port *sport)
|
||
imx_uart_writel(sport, sport->saved_reg[2], UCR3);
|
||
imx_uart_writel(sport, sport->saved_reg[3], UCR4);
|
||
sport->context_saved = false;
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static void imx_uart_save_context(struct imx_port *sport)
|
||
@@ -2533,7 +2533,7 @@ static void imx_uart_save_context(struct imx_port *sport)
|
||
unsigned long flags;
|
||
|
||
/* Save necessary regs */
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
|
||
sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
|
||
sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
|
||
@@ -2545,7 +2545,7 @@ static void imx_uart_save_context(struct imx_port *sport)
|
||
sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
|
||
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
|
||
sport->context_saved = true;
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
|
||
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
|
||
index 845ff706b..320b29cd4 100644
|
||
--- a/drivers/tty/serial/ip22zilog.c
|
||
+++ b/drivers/tty/serial/ip22zilog.c
|
||
@@ -432,7 +432,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||
unsigned char r3;
|
||
bool push = false;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
r3 = read_zsreg(channel, R3);
|
||
|
||
/* Channel A */
|
||
@@ -448,7 +448,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||
if (r3 & CHATxIP)
|
||
ip22zilog_transmit_chars(up, channel);
|
||
}
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
if (push)
|
||
tty_flip_buffer_push(&up->port.state->port);
|
||
@@ -458,7 +458,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
|
||
push = false;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
|
||
writeb(RES_H_IUS, &channel->control);
|
||
ZSDELAY();
|
||
@@ -471,7 +471,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||
if (r3 & CHBTxIP)
|
||
ip22zilog_transmit_chars(up, channel);
|
||
}
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
if (push)
|
||
tty_flip_buffer_push(&up->port.state->port);
|
||
@@ -504,11 +504,11 @@ static unsigned int ip22zilog_tx_empty(struct uart_port *port)
|
||
unsigned char status;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
status = ip22zilog_read_channel_status(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (status & Tx_BUF_EMP)
|
||
ret = TIOCSER_TEMT;
|
||
@@ -664,7 +664,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
|
||
else
|
||
clear_bits |= SND_BRK;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
|
||
if (new_reg != up->curregs[R5]) {
|
||
@@ -674,7 +674,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
|
||
write_zsreg(channel, R5, up->curregs[R5]);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
|
||
@@ -735,9 +735,9 @@ static int ip22zilog_startup(struct uart_port *port)
|
||
if (ZS_IS_CONS(up))
|
||
return 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
__ip22zilog_startup(up);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return 0;
|
||
}
|
||
|
||
@@ -775,7 +775,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
|
||
if (ZS_IS_CONS(up))
|
||
return;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
channel = ZILOG_CHANNEL_FROM_PORT(port);
|
||
|
||
@@ -788,7 +788,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
|
||
up->curregs[R5] &= ~SND_BRK;
|
||
ip22zilog_maybe_update_regs(up, channel);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Shared by TTY driver and serial console setup. The port lock is held
|
||
@@ -880,7 +880,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
|
||
|
||
@@ -894,7 +894,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static const char *ip22zilog_type(struct uart_port *port)
|
||
@@ -1016,10 +1016,10 @@ ip22zilog_console_write(struct console *con, const char *s, unsigned int count)
|
||
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
uart_console_write(&up->port, s, count, ip22zilog_put_char);
|
||
udelay(2);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int __init ip22zilog_console_setup(struct console *con, char *options)
|
||
@@ -1034,13 +1034,13 @@ static int __init ip22zilog_console_setup(struct console *con, char *options)
|
||
|
||
printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
up->curregs[R15] |= BRKIE;
|
||
|
||
__ip22zilog_startup(up);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
if (options)
|
||
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
||
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
|
||
index 0c78f6627..2bd640428 100644
|
||
--- a/drivers/tty/serial/jsm/jsm_neo.c
|
||
+++ b/drivers/tty/serial/jsm/jsm_neo.c
|
||
@@ -816,9 +816,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
|
||
/* Parse any modem signal changes */
|
||
jsm_dbg(INTR, &ch->ch_bd->pci_dev,
|
||
"MOD_STAT: sending to parse_modem_sigs\n");
|
||
- spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
|
||
+ uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
|
||
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
|
||
- spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
|
||
+ uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
|
||
}
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
|
||
index 222afc270..ce0fef7e2 100644
|
||
--- a/drivers/tty/serial/jsm/jsm_tty.c
|
||
+++ b/drivers/tty/serial/jsm/jsm_tty.c
|
||
@@ -152,14 +152,14 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
|
||
container_of(port, struct jsm_channel, uart_port);
|
||
struct ktermios *termios;
|
||
|
||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||
termios = &port->state->port.tty->termios;
|
||
if (ch == termios->c_cc[VSTART])
|
||
channel->ch_bd->bd_ops->send_start_character(channel);
|
||
|
||
if (ch == termios->c_cc[VSTOP])
|
||
channel->ch_bd->bd_ops->send_stop_character(channel);
|
||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||
}
|
||
|
||
static void jsm_tty_stop_rx(struct uart_port *port)
|
||
@@ -176,13 +176,13 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
|
||
struct jsm_channel *channel =
|
||
container_of(port, struct jsm_channel, uart_port);
|
||
|
||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||
if (break_state == -1)
|
||
channel->ch_bd->bd_ops->send_break(channel);
|
||
else
|
||
channel->ch_bd->bd_ops->clear_break(channel);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||
}
|
||
|
||
static int jsm_tty_open(struct uart_port *port)
|
||
@@ -241,7 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
|
||
channel->ch_cached_lsr = 0;
|
||
channel->ch_stops_sent = 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||
termios = &port->state->port.tty->termios;
|
||
channel->ch_c_cflag = termios->c_cflag;
|
||
channel->ch_c_iflag = termios->c_iflag;
|
||
@@ -261,7 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
|
||
jsm_carrier(channel);
|
||
|
||
channel->ch_open_count++;
|
||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||
|
||
jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
|
||
return 0;
|
||
@@ -307,7 +307,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
|
||
struct jsm_channel *channel =
|
||
container_of(port, struct jsm_channel, uart_port);
|
||
|
||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||
channel->ch_c_cflag = termios->c_cflag;
|
||
channel->ch_c_iflag = termios->c_iflag;
|
||
channel->ch_c_oflag = termios->c_oflag;
|
||
@@ -317,7 +317,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
|
||
|
||
channel->ch_bd->bd_ops->param(channel);
|
||
jsm_carrier(channel);
|
||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||
}
|
||
|
||
static const char *jsm_tty_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
|
||
index d881cdd2a..a25ab1efe 100644
|
||
--- a/drivers/tty/serial/liteuart.c
|
||
+++ b/drivers/tty/serial/liteuart.c
|
||
@@ -139,13 +139,13 @@ static irqreturn_t liteuart_interrupt(int irq, void *data)
|
||
* if polling, the context would be "in_serving_softirq", so use
|
||
* irq[save|restore] spin_lock variants to cover all possibilities
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
|
||
if (isr & EV_RX)
|
||
liteuart_rx_chars(port);
|
||
if (isr & EV_TX)
|
||
liteuart_tx_chars(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_RETVAL(isr);
|
||
}
|
||
@@ -195,10 +195,10 @@ static int liteuart_startup(struct uart_port *port)
|
||
}
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* only enabling rx irqs during startup */
|
||
liteuart_update_irq_reg(port, true, EV_RX);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (!port->irq) {
|
||
timer_setup(&uart->timer, liteuart_timer, 0);
|
||
@@ -213,9 +213,9 @@ static void liteuart_shutdown(struct uart_port *port)
|
||
struct liteuart_port *uart = to_liteuart_port(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (port->irq)
|
||
free_irq(port->irq, port);
|
||
@@ -229,13 +229,13 @@ static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
unsigned int baud;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* update baudrate */
|
||
baud = uart_get_baud_rate(port, new, old, 0, 460800);
|
||
uart_update_timeout(port, new->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *liteuart_type(struct uart_port *port)
|
||
@@ -382,9 +382,9 @@ static void liteuart_console_write(struct console *co, const char *s,
|
||
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
|
||
port = &uart->port;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
uart_console_write(port, s, count, liteuart_putchar);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int liteuart_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
|
||
index b38fe4728..5149a947b 100644
|
||
--- a/drivers/tty/serial/lpc32xx_hs.c
|
||
+++ b/drivers/tty/serial/lpc32xx_hs.c
|
||
@@ -140,15 +140,15 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
|
||
if (up->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&up->port.lock);
|
||
+ locked = uart_port_trylock(&up->port);
|
||
else
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
|
||
wait_for_xmit_empty(&up->port);
|
||
|
||
if (locked)
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
local_irq_restore(flags);
|
||
}
|
||
|
||
@@ -298,7 +298,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
|
||
struct tty_port *tport = &port->state->port;
|
||
u32 status;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
/* Read UART status and clear latched interrupts */
|
||
status = readl(LPC32XX_HSUART_IIR(port->membase));
|
||
@@ -333,7 +333,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
|
||
__serial_lpc32xx_tx(port);
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -404,14 +404,14 @@ static void serial_lpc32xx_break_ctl(struct uart_port *port,
|
||
unsigned long flags;
|
||
u32 tmp;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
|
||
if (break_state != 0)
|
||
tmp |= LPC32XX_HSU_BREAK;
|
||
else
|
||
tmp &= ~LPC32XX_HSU_BREAK;
|
||
writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* port->lock is not held. */
|
||
@@ -421,7 +421,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
|
||
unsigned long flags;
|
||
u32 tmp;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
__serial_uart_flush(port);
|
||
|
||
@@ -441,7 +441,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
|
||
|
||
lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
retval = request_irq(port->irq, serial_lpc32xx_interrupt,
|
||
0, MODNAME, port);
|
||
@@ -458,7 +458,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
|
||
u32 tmp;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
|
||
LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
|
||
@@ -466,7 +466,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
|
||
|
||
lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
free_irq(port->irq, port);
|
||
}
|
||
@@ -491,7 +491,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
|
||
|
||
quot = __serial_get_clock_div(port->uartclk, baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Ignore characters? */
|
||
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
|
||
@@ -505,7 +505,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Don't rewrite B0 */
|
||
if (tty_termios_baud_rate(termios))
|
||
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
|
||
index 99225f1e0..faccd772c 100644
|
||
--- a/drivers/tty/serial/ma35d1_serial.c
|
||
+++ b/drivers/tty/serial/ma35d1_serial.c
|
||
@@ -269,16 +269,16 @@ static void receive_chars(struct uart_ma35d1_port *up)
|
||
if (uart_handle_sysrq_char(&up->port, ch))
|
||
continue;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
fsr = serial_in(up, MA35_FSR_REG);
|
||
} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
tty_flip_buffer_push(&up->port.state->port);
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
}
|
||
|
||
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
|
||
@@ -364,14 +364,14 @@ static void ma35d1serial_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
u32 lcr;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
lcr = serial_in(up, MA35_LCR_REG);
|
||
if (break_state != 0)
|
||
lcr |= MA35_LCR_BREAK;
|
||
else
|
||
lcr &= ~MA35_LCR_BREAK;
|
||
serial_out(up, MA35_LCR_REG, lcr);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int ma35d1serial_startup(struct uart_port *port)
|
||
@@ -441,7 +441,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
|
||
if (termios->c_iflag & INPCK)
|
||
@@ -475,7 +475,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
|
||
|
||
serial_out(up, MA35_LCR_REG, lcr);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static const char *ma35d1serial_type(struct uart_port *port)
|
||
@@ -568,9 +568,9 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
|
||
if (up->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* First save the IER then disable the interrupts
|
||
@@ -584,7 +584,7 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
|
||
serial_out(up, MA35_IER_REG, ier);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int __init ma35d1serial_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
|
||
index aea29b4e6..ee40af20a 100644
|
||
--- a/drivers/tty/serial/mcf.c
|
||
+++ b/drivers/tty/serial/mcf.c
|
||
@@ -135,12 +135,12 @@ static void mcf_break_ctl(struct uart_port *port, int break_state)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (break_state == -1)
|
||
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
|
||
else
|
||
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/****************************************************************************/
|
||
@@ -150,7 +150,7 @@ static int mcf_startup(struct uart_port *port)
|
||
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Reset UART, get it into known state... */
|
||
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
|
||
@@ -164,7 +164,7 @@ static int mcf_startup(struct uart_port *port)
|
||
pp->imr = MCFUART_UIR_RXREADY;
|
||
writeb(pp->imr, port->membase + MCFUART_UIMR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -176,7 +176,7 @@ static void mcf_shutdown(struct uart_port *port)
|
||
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable all interrupts now */
|
||
pp->imr = 0;
|
||
@@ -186,7 +186,7 @@ static void mcf_shutdown(struct uart_port *port)
|
||
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
|
||
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/****************************************************************************/
|
||
@@ -252,7 +252,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
mr2 |= MCFUART_MR2_TXCTS;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (port->rs485.flags & SER_RS485_ENABLED) {
|
||
dev_dbg(port->dev, "Setting UART to RS485\n");
|
||
mr2 |= MCFUART_MR2_TXRTS;
|
||
@@ -273,7 +273,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
port->membase + MCFUART_UCSR);
|
||
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
|
||
port->membase + MCFUART_UCR);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/****************************************************************************/
|
||
@@ -350,7 +350,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
|
||
|
||
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
if (isr & MCFUART_UIR_RXREADY) {
|
||
mcf_rx_chars(pp);
|
||
ret = IRQ_HANDLED;
|
||
@@ -359,7 +359,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
|
||
mcf_tx_chars(pp);
|
||
ret = IRQ_HANDLED;
|
||
}
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
|
||
index d2502aaa3..8048fa542 100644
|
||
--- a/drivers/tty/serial/men_z135_uart.c
|
||
+++ b/drivers/tty/serial/men_z135_uart.c
|
||
@@ -392,7 +392,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
|
||
if (!irq_id)
|
||
goto out;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
/* It's save to write to IIR[7:6] RXC[9:8] */
|
||
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
|
||
|
||
@@ -418,7 +418,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
|
||
handled = true;
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
out:
|
||
return IRQ_RETVAL(handled);
|
||
}
|
||
@@ -708,7 +708,7 @@ static void men_z135_set_termios(struct uart_port *port,
|
||
|
||
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
|
||
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
if (tty_termios_baud_rate(termios))
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
|
||
@@ -716,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
|
||
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
}
|
||
|
||
static const char *men_z135_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
|
||
index 9388b9dde..4c1d2089a 100644
|
||
--- a/drivers/tty/serial/meson_uart.c
|
||
+++ b/drivers/tty/serial/meson_uart.c
|
||
@@ -129,14 +129,14 @@ static void meson_uart_shutdown(struct uart_port *port)
|
||
|
||
free_irq(port->irq, port);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = readl(port->membase + AML_UART_CONTROL);
|
||
val &= ~AML_UART_RX_EN;
|
||
val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
|
||
writel(val, port->membase + AML_UART_CONTROL);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void meson_uart_start_tx(struct uart_port *port)
|
||
@@ -238,7 +238,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
|
||
{
|
||
struct uart_port *port = (struct uart_port *)dev_id;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
|
||
meson_receive_chars(port);
|
||
@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
|
||
meson_uart_start_tx(port);
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -284,7 +284,7 @@ static int meson_uart_startup(struct uart_port *port)
|
||
u32 val;
|
||
int ret = 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = readl(port->membase + AML_UART_CONTROL);
|
||
val |= AML_UART_CLEAR_ERR;
|
||
@@ -301,7 +301,7 @@ static int meson_uart_startup(struct uart_port *port)
|
||
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
|
||
writel(val, port->membase + AML_UART_MISC);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
ret = request_irq(port->irq, meson_uart_interrupt, 0,
|
||
port->name, port);
|
||
@@ -341,7 +341,7 @@ static void meson_uart_set_termios(struct uart_port *port,
|
||
unsigned long flags;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
cflags = termios->c_cflag;
|
||
iflags = termios->c_iflag;
|
||
@@ -405,7 +405,7 @@ static void meson_uart_set_termios(struct uart_port *port,
|
||
AML_UART_FRAME_ERR;
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int meson_uart_verify_port(struct uart_port *port,
|
||
@@ -464,14 +464,14 @@ static int meson_uart_poll_get_char(struct uart_port *port)
|
||
u32 c;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
|
||
c = NO_POLL_CHAR;
|
||
else
|
||
c = readl(port->membase + AML_UART_RFIFO);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return c;
|
||
}
|
||
@@ -482,7 +482,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||
u32 reg;
|
||
int ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Wait until FIFO is empty or timeout */
|
||
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
|
||
@@ -506,7 +506,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
|
||
|
||
out:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
#endif /* CONFIG_CONSOLE_POLL */
|
||
@@ -563,9 +563,9 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
|
||
if (port->sysrq) {
|
||
locked = 0;
|
||
} else if (oops_in_progress) {
|
||
- locked = spin_trylock(&port->lock);
|
||
+ locked = uart_port_trylock(port);
|
||
} else {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
locked = 1;
|
||
}
|
||
|
||
@@ -577,7 +577,7 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
|
||
writel(val, port->membase + AML_UART_CONTROL);
|
||
|
||
if (locked)
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
local_irq_restore(flags);
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
|
||
index 70a910085..db3b81f2a 100644
|
||
--- a/drivers/tty/serial/milbeaut_usio.c
|
||
+++ b/drivers/tty/serial/milbeaut_usio.c
|
||
@@ -207,9 +207,9 @@ static irqreturn_t mlb_usio_rx_irq(int irq, void *dev_id)
|
||
{
|
||
struct uart_port *port = dev_id;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
mlb_usio_rx_chars(port);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -218,10 +218,10 @@ static irqreturn_t mlb_usio_tx_irq(int irq, void *dev_id)
|
||
{
|
||
struct uart_port *port = dev_id;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
if (readb(port->membase + MLB_USIO_REG_SSR) & MLB_USIO_SSR_TBI)
|
||
mlb_usio_tx_chars(port);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -267,7 +267,7 @@ static int mlb_usio_startup(struct uart_port *port)
|
||
escr = readb(port->membase + MLB_USIO_REG_ESCR);
|
||
if (of_property_read_bool(port->dev->of_node, "auto-flow-control"))
|
||
escr |= MLB_USIO_ESCR_FLWEN;
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
writeb(0, port->membase + MLB_USIO_REG_SCR);
|
||
writeb(escr, port->membase + MLB_USIO_REG_ESCR);
|
||
writeb(MLB_USIO_SCR_UPCL, port->membase + MLB_USIO_REG_SCR);
|
||
@@ -282,7 +282,7 @@ static int mlb_usio_startup(struct uart_port *port)
|
||
|
||
writeb(MLB_USIO_SCR_TXE | MLB_USIO_SCR_RIE | MLB_USIO_SCR_TBIE |
|
||
MLB_USIO_SCR_RXE, port->membase + MLB_USIO_REG_SCR);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -337,7 +337,7 @@ static void mlb_usio_set_termios(struct uart_port *port,
|
||
else
|
||
quot = 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
port->read_status_mask = MLB_USIO_SSR_ORE | MLB_USIO_SSR_RDRF |
|
||
MLB_USIO_SSR_TDRE;
|
||
@@ -367,7 +367,7 @@ static void mlb_usio_set_termios(struct uart_port *port,
|
||
writew(BIT(12), port->membase + MLB_USIO_REG_FBYTE);
|
||
writeb(MLB_USIO_SCR_RIE | MLB_USIO_SCR_RXE | MLB_USIO_SCR_TBIE |
|
||
MLB_USIO_SCR_TXE, port->membase + MLB_USIO_REG_SCR);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *mlb_usio_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
|
||
index 916507b8f..a252465e7 100644
|
||
--- a/drivers/tty/serial/mpc52xx_uart.c
|
||
+++ b/drivers/tty/serial/mpc52xx_uart.c
|
||
@@ -1096,14 +1096,14 @@ static void
|
||
mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
|
||
{
|
||
unsigned long flags;
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (ctl == -1)
|
||
psc_ops->command(port, MPC52xx_PSC_START_BRK);
|
||
else
|
||
psc_ops->command(port, MPC52xx_PSC_STOP_BRK);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int
|
||
@@ -1214,7 +1214,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
}
|
||
|
||
/* Get the lock */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Do our best to flush TX & RX, so we don't lose anything */
|
||
/* But we don't wait indefinitely ! */
|
||
@@ -1250,7 +1250,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
psc_ops->command(port, MPC52xx_PSC_RX_ENABLE);
|
||
|
||
/* We're all set, release the lock */
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *
|
||
@@ -1477,11 +1477,11 @@ mpc52xx_uart_int(int irq, void *dev_id)
|
||
struct uart_port *port = dev_id;
|
||
irqreturn_t ret;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
ret = psc_ops->handle_irq(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return ret;
|
||
}
|
||
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
|
||
index ea5a7911c..2a4c09f3a 100644
|
||
--- a/drivers/tty/serial/mps2-uart.c
|
||
+++ b/drivers/tty/serial/mps2-uart.c
|
||
@@ -188,12 +188,12 @@ static irqreturn_t mps2_uart_rxirq(int irq, void *data)
|
||
if (unlikely(!(irqflag & UARTn_INT_RX)))
|
||
return IRQ_NONE;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
|
||
mps2_uart_rx_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -206,12 +206,12 @@ static irqreturn_t mps2_uart_txirq(int irq, void *data)
|
||
if (unlikely(!(irqflag & UARTn_INT_TX)))
|
||
return IRQ_NONE;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
|
||
mps2_uart_tx_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -222,7 +222,7 @@ static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
|
||
struct uart_port *port = data;
|
||
u8 irqflag = mps2_uart_read8(port, UARTn_INT);
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (irqflag & UARTn_INT_RX_OVERRUN) {
|
||
struct tty_port *tport = &port->state->port;
|
||
@@ -244,7 +244,7 @@ static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
|
||
handled = IRQ_HANDLED;
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return handled;
|
||
}
|
||
@@ -356,12 +356,12 @@ mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (tty_termios_baud_rate(termios))
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
|
||
index 90953e679..597264b54 100644
|
||
--- a/drivers/tty/serial/msm_serial.c
|
||
+++ b/drivers/tty/serial/msm_serial.c
|
||
@@ -444,7 +444,7 @@ static void msm_complete_tx_dma(void *args)
|
||
unsigned int count;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Already stopped */
|
||
if (!dma->count)
|
||
@@ -476,7 +476,7 @@ static void msm_complete_tx_dma(void *args)
|
||
|
||
msm_handle_tx(port);
|
||
done:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
|
||
@@ -549,7 +549,7 @@ static void msm_complete_rx_dma(void *args)
|
||
unsigned long flags;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Already stopped */
|
||
if (!dma->count)
|
||
@@ -587,16 +587,16 @@ static void msm_complete_rx_dma(void *args)
|
||
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
|
||
flag = TTY_NORMAL;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (!sysrq)
|
||
tty_insert_flip_char(tport, dma->virt[i], flag);
|
||
}
|
||
|
||
msm_start_rx_dma(msm_port);
|
||
done:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (count)
|
||
tty_flip_buffer_push(tport);
|
||
@@ -762,9 +762,9 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
|
||
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
|
||
flag = TTY_NORMAL;
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
sysrq = uart_handle_sysrq_char(port, buf[i]);
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
if (!sysrq)
|
||
tty_insert_flip_char(tport, buf[i], flag);
|
||
}
|
||
@@ -824,9 +824,9 @@ static void msm_handle_rx(struct uart_port *port)
|
||
else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
|
||
flag = TTY_FRAME;
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
sysrq = uart_handle_sysrq_char(port, c);
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
if (!sysrq)
|
||
tty_insert_flip_char(tport, c, flag);
|
||
}
|
||
@@ -951,7 +951,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
|
||
unsigned int misr;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
misr = msm_read(port, MSM_UART_MISR);
|
||
msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
|
||
|
||
@@ -983,7 +983,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
|
||
msm_handle_delta_cts(port);
|
||
|
||
msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -1128,13 +1128,13 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
|
||
unsigned long flags, rate;
|
||
|
||
flags = *saved_flags;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
entry = msm_find_best_baud(port, baud, &rate);
|
||
clk_set_rate(msm_port->clk, rate);
|
||
baud = rate / 16 / entry->divisor;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
*saved_flags = flags;
|
||
port->uartclk = rate;
|
||
|
||
@@ -1266,7 +1266,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
unsigned long flags;
|
||
unsigned int baud, mr;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (dma->chan) /* Terminate if any */
|
||
msm_stop_dma(port, dma);
|
||
@@ -1338,7 +1338,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
/* Try to use DMA */
|
||
msm_start_rx_dma(msm_port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *msm_type(struct uart_port *port)
|
||
@@ -1620,9 +1620,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
|
||
if (port->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&port->lock);
|
||
+ locked = uart_port_trylock(port);
|
||
else
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (is_uartdm)
|
||
msm_reset_dm_count(port, count);
|
||
@@ -1661,7 +1661,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
|
||
}
|
||
|
||
if (locked)
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
local_irq_restore(flags);
|
||
}
|
||
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
|
||
index ea924e9b9..0255646bc 100644
|
||
--- a/drivers/tty/serial/mvebu-uart.c
|
||
+++ b/drivers/tty/serial/mvebu-uart.c
|
||
@@ -187,9 +187,9 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int st;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
st = readl(port->membase + UART_STAT);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
|
||
}
|
||
@@ -249,14 +249,14 @@ static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
|
||
unsigned int ctl;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ctl = readl(port->membase + UART_CTRL(port));
|
||
if (brk == -1)
|
||
ctl |= CTRL_SND_BRK_SEQ;
|
||
else
|
||
ctl &= ~CTRL_SND_BRK_SEQ;
|
||
writel(ctl, port->membase + UART_CTRL(port));
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
|
||
@@ -540,7 +540,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
|
||
unsigned long flags;
|
||
unsigned int baud, min_baud, max_baud;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
|
||
STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
|
||
@@ -589,7 +589,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *mvebu_uart_type(struct uart_port *port)
|
||
@@ -735,9 +735,9 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
|
||
intr = readl(port->membase + UART_INTR(port)) &
|
||
@@ -758,7 +758,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
|
||
}
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int mvebu_uart_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
|
||
index 135a838f5..1097fca22 100644
|
||
--- a/drivers/tty/serial/omap-serial.c
|
||
+++ b/drivers/tty/serial/omap-serial.c
|
||
@@ -390,10 +390,10 @@ static void serial_omap_throttle(struct uart_port *port)
|
||
struct uart_omap_port *up = to_uart_omap_port(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static void serial_omap_unthrottle(struct uart_port *port)
|
||
@@ -401,10 +401,10 @@ static void serial_omap_unthrottle(struct uart_port *port)
|
||
struct uart_omap_port *up = to_uart_omap_port(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static unsigned int check_modem_status(struct uart_omap_port *up)
|
||
@@ -527,7 +527,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
|
||
irqreturn_t ret = IRQ_NONE;
|
||
int max_count = 256;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
do {
|
||
iir = serial_in(up, UART_IIR);
|
||
@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
|
||
}
|
||
} while (max_count--);
|
||
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
tty_flip_buffer_push(&up->port.state->port);
|
||
|
||
@@ -579,9 +579,9 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
|
||
unsigned int ret = 0;
|
||
|
||
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -647,13 +647,13 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
|
||
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (break_state == -1)
|
||
up->lcr |= UART_LCR_SBC;
|
||
else
|
||
up->lcr &= ~UART_LCR_SBC;
|
||
serial_out(up, UART_LCR, up->lcr);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int serial_omap_startup(struct uart_port *port)
|
||
@@ -701,13 +701,13 @@ static int serial_omap_startup(struct uart_port *port)
|
||
* Now, initialize the UART
|
||
*/
|
||
serial_out(up, UART_LCR, UART_LCR_WLEN8);
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
/*
|
||
* Most PC uarts need OUT2 raised to enable interrupts.
|
||
*/
|
||
up->port.mctrl |= TIOCM_OUT2;
|
||
serial_omap_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
up->msr_saved_flags = 0;
|
||
/*
|
||
@@ -742,10 +742,10 @@ static void serial_omap_shutdown(struct uart_port *port)
|
||
up->ier = 0;
|
||
serial_out(up, UART_IER, 0);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->port.mctrl &= ~TIOCM_OUT2;
|
||
serial_omap_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/*
|
||
* Disable break condition and FIFOs
|
||
@@ -815,7 +815,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -1013,7 +1013,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
serial_omap_set_mctrl(&up->port, up->port.mctrl);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
|
||
}
|
||
|
||
@@ -1212,13 +1212,10 @@ serial_omap_console_write(struct console *co, const char *s,
|
||
unsigned int ier;
|
||
int locked = 1;
|
||
|
||
- local_irq_save(flags);
|
||
- if (up->port.sysrq)
|
||
- locked = 0;
|
||
- else if (oops_in_progress)
|
||
- locked = spin_trylock(&up->port.lock);
|
||
+ if (up->port.sysrq || oops_in_progress)
|
||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||
else
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* First save the IER then disable the interrupts
|
||
@@ -1245,8 +1242,7 @@ serial_omap_console_write(struct console *co, const char *s,
|
||
check_modem_status(up);
|
||
|
||
if (locked)
|
||
- spin_unlock(&up->port.lock);
|
||
- local_irq_restore(flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int __init
|
||
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
|
||
index e99970a94..919f5e5aa 100644
|
||
--- a/drivers/tty/serial/owl-uart.c
|
||
+++ b/drivers/tty/serial/owl-uart.c
|
||
@@ -125,12 +125,12 @@ static unsigned int owl_uart_tx_empty(struct uart_port *port)
|
||
u32 val;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = owl_uart_read(port, OWL_UART_STAT);
|
||
ret = (val & OWL_UART_STAT_TFES) ? TIOCSER_TEMT : 0;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -232,7 +232,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
|
||
unsigned long flags;
|
||
u32 stat;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
stat = owl_uart_read(port, OWL_UART_STAT);
|
||
|
||
@@ -246,7 +246,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
|
||
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
|
||
owl_uart_write(port, stat, OWL_UART_STAT);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -256,14 +256,14 @@ static void owl_uart_shutdown(struct uart_port *port)
|
||
u32 val;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = owl_uart_read(port, OWL_UART_CTL);
|
||
val &= ~(OWL_UART_CTL_TXIE | OWL_UART_CTL_RXIE
|
||
| OWL_UART_CTL_TXDE | OWL_UART_CTL_RXDE | OWL_UART_CTL_EN);
|
||
owl_uart_write(port, val, OWL_UART_CTL);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
free_irq(port->irq, port);
|
||
}
|
||
@@ -279,7 +279,7 @@ static int owl_uart_startup(struct uart_port *port)
|
||
if (ret)
|
||
return ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = owl_uart_read(port, OWL_UART_STAT);
|
||
val |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP
|
||
@@ -291,7 +291,7 @@ static int owl_uart_startup(struct uart_port *port)
|
||
val |= OWL_UART_CTL_EN;
|
||
owl_uart_write(port, val, OWL_UART_CTL);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -311,7 +311,7 @@ static void owl_uart_set_termios(struct uart_port *port,
|
||
u32 ctl;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ctl = owl_uart_read(port, OWL_UART_CTL);
|
||
|
||
@@ -371,7 +371,7 @@ static void owl_uart_set_termios(struct uart_port *port,
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void owl_uart_release_port(struct uart_port *port)
|
||
@@ -515,9 +515,9 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
|
||
if (port->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&port->lock);
|
||
+ locked = uart_port_trylock(port);
|
||
else {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
locked = 1;
|
||
}
|
||
|
||
@@ -541,7 +541,7 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
|
||
owl_uart_write(port, old_ctl, OWL_UART_CTL);
|
||
|
||
if (locked)
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
local_irq_restore(flags);
|
||
}
|
||
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
|
||
index cc83b772b..436cc6d52 100644
|
||
--- a/drivers/tty/serial/pch_uart.c
|
||
+++ b/drivers/tty/serial/pch_uart.c
|
||
@@ -1347,7 +1347,7 @@ static void pch_uart_set_termios(struct uart_port *port,
|
||
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
|
||
|
||
spin_lock_irqsave(&priv->lock, flags);
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
|
||
@@ -1360,7 +1360,7 @@ static void pch_uart_set_termios(struct uart_port *port,
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
|
||
out:
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
spin_unlock_irqrestore(&priv->lock, flags);
|
||
}
|
||
|
||
@@ -1581,10 +1581,10 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
|
||
port_locked = 0;
|
||
} else if (oops_in_progress) {
|
||
priv_locked = spin_trylock(&priv->lock);
|
||
- port_locked = spin_trylock(&priv->port.lock);
|
||
+ port_locked = uart_port_trylock(&priv->port);
|
||
} else {
|
||
spin_lock(&priv->lock);
|
||
- spin_lock(&priv->port.lock);
|
||
+ uart_port_lock(&priv->port);
|
||
}
|
||
|
||
/*
|
||
@@ -1604,7 +1604,7 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
|
||
iowrite8(ier, priv->membase + UART_IER);
|
||
|
||
if (port_locked)
|
||
- spin_unlock(&priv->port.lock);
|
||
+ uart_port_unlock(&priv->port);
|
||
if (priv_locked)
|
||
spin_unlock(&priv->lock);
|
||
local_irq_restore(flags);
|
||
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
|
||
index e308d5022..3a95bf5d5 100644
|
||
--- a/drivers/tty/serial/pic32_uart.c
|
||
+++ b/drivers/tty/serial/pic32_uart.c
|
||
@@ -243,7 +243,7 @@ static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
|
||
struct pic32_sport *sport = to_pic32_sport(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (ctl)
|
||
pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
|
||
@@ -252,7 +252,7 @@ static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
|
||
pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
|
||
PIC32_UART_STA_UTXBRK);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* get port type in string format */
|
||
@@ -274,7 +274,7 @@ static void pic32_uart_do_rx(struct uart_port *port)
|
||
*/
|
||
max_count = PIC32_UART_RX_FIFO_DEPTH;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
tty = &port->state->port;
|
||
|
||
@@ -331,7 +331,7 @@ static void pic32_uart_do_rx(struct uart_port *port)
|
||
|
||
} while (--max_count);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
tty_flip_buffer_push(tty);
|
||
}
|
||
@@ -410,9 +410,9 @@ static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id)
|
||
struct uart_port *port = dev_id;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
pic32_uart_do_tx(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -580,9 +580,9 @@ static void pic32_uart_shutdown(struct uart_port *port)
|
||
unsigned long flags;
|
||
|
||
/* disable uart */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
pic32_uart_dsbl_and_mask(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
clk_disable_unprepare(sport->clk);
|
||
|
||
/* free all 3 interrupts for this UART */
|
||
@@ -604,7 +604,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
|
||
unsigned int quot;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* disable uart and mask all interrupts while changing speed */
|
||
pic32_uart_dsbl_and_mask(port);
|
||
@@ -672,7 +672,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
|
||
/* enable uart */
|
||
pic32_uart_en_and_unmask(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* serial core request to claim uart iomem */
|
||
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
|
||
index 29bc80d39..77691fbbf 100644
|
||
--- a/drivers/tty/serial/pmac_zilog.c
|
||
+++ b/drivers/tty/serial/pmac_zilog.c
|
||
@@ -245,9 +245,9 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
|
||
#endif /* USE_CTRL_O_SYSRQ */
|
||
if (uap->port.sysrq) {
|
||
int swallow;
|
||
- spin_unlock(&uap->port.lock);
|
||
+ uart_port_unlock(&uap->port);
|
||
swallow = uart_handle_sysrq_char(&uap->port, ch);
|
||
- spin_lock(&uap->port.lock);
|
||
+ uart_port_lock(&uap->port);
|
||
if (swallow)
|
||
goto next_char;
|
||
}
|
||
@@ -421,7 +421,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
|
||
uap_a = pmz_get_port_A(uap);
|
||
uap_b = uap_a->mate;
|
||
|
||
- spin_lock(&uap_a->port.lock);
|
||
+ uart_port_lock(&uap_a->port);
|
||
r3 = read_zsreg(uap_a, R3);
|
||
|
||
/* Channel A */
|
||
@@ -442,14 +442,14 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
|
||
rc = IRQ_HANDLED;
|
||
}
|
||
skip_a:
|
||
- spin_unlock(&uap_a->port.lock);
|
||
+ uart_port_unlock(&uap_a->port);
|
||
if (push)
|
||
tty_flip_buffer_push(&uap->port.state->port);
|
||
|
||
if (!uap_b)
|
||
goto out;
|
||
|
||
- spin_lock(&uap_b->port.lock);
|
||
+ uart_port_lock(&uap_b->port);
|
||
push = false;
|
||
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
|
||
if (!ZS_IS_OPEN(uap_b)) {
|
||
@@ -467,7 +467,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
|
||
rc = IRQ_HANDLED;
|
||
}
|
||
skip_b:
|
||
- spin_unlock(&uap_b->port.lock);
|
||
+ uart_port_unlock(&uap_b->port);
|
||
if (push)
|
||
tty_flip_buffer_push(&uap->port.state->port);
|
||
|
||
@@ -483,9 +483,9 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
|
||
unsigned long flags;
|
||
u8 status;
|
||
|
||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||
status = read_zsreg(uap, R0);
|
||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||
|
||
return status;
|
||
}
|
||
@@ -671,7 +671,7 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
|
||
else
|
||
clear_bits |= SND_BRK;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
|
||
if (new_reg != uap->curregs[R5]) {
|
||
@@ -679,7 +679,7 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
|
||
write_zsreg(uap, R5, uap->curregs[R5]);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
#ifdef CONFIG_PPC_PMAC
|
||
@@ -851,18 +851,18 @@ static void pmz_irda_reset(struct uart_pmac_port *uap)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||
uap->curregs[R5] |= DTR;
|
||
write_zsreg(uap, R5, uap->curregs[R5]);
|
||
zssync(uap);
|
||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||
msleep(110);
|
||
|
||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||
uap->curregs[R5] &= ~DTR;
|
||
write_zsreg(uap, R5, uap->curregs[R5]);
|
||
zssync(uap);
|
||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||
msleep(10);
|
||
}
|
||
|
||
@@ -882,9 +882,9 @@ static int pmz_startup(struct uart_port *port)
|
||
* initialize the chip
|
||
*/
|
||
if (!ZS_IS_CONS(uap)) {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
pwr_delay = __pmz_startup(uap);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
|
||
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
|
||
@@ -907,9 +907,9 @@ static int pmz_startup(struct uart_port *port)
|
||
pmz_irda_reset(uap);
|
||
|
||
/* Enable interrupt requests for the channel */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
pmz_interrupt_control(uap, 1);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -919,7 +919,7 @@ static void pmz_shutdown(struct uart_port *port)
|
||
struct uart_pmac_port *uap = to_pmz(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable interrupt requests for the channel */
|
||
pmz_interrupt_control(uap, 0);
|
||
@@ -934,19 +934,19 @@ static void pmz_shutdown(struct uart_port *port)
|
||
pmz_maybe_update_regs(uap);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Release interrupt handler */
|
||
free_irq(uap->port.irq, uap);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
|
||
|
||
if (!ZS_IS_CONS(uap))
|
||
pmz_set_scc_power(uap, 0); /* Shut the chip down */
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Shared by TTY driver and serial console setup. The port lock is held
|
||
@@ -1233,7 +1233,7 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
struct uart_pmac_port *uap = to_pmz(port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable IRQs on the port */
|
||
pmz_interrupt_control(uap, 0);
|
||
@@ -1245,7 +1245,7 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (ZS_IS_OPEN(uap))
|
||
pmz_interrupt_control(uap, 1);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *pmz_type(struct uart_port *port)
|
||
@@ -1882,7 +1882,7 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
|
||
struct uart_pmac_port *uap = &pmz_ports[con->index];
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||
|
||
/* Turn of interrupts and enable the transmitter. */
|
||
write_zsreg(uap, R1, uap->curregs[1] & ~TxINT_ENAB);
|
||
@@ -1894,7 +1894,7 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
|
||
write_zsreg(uap, R1, uap->curregs[1]);
|
||
/* Don't disable the transmitter. */
|
||
|
||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||
}
|
||
|
||
/*
|
||
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
|
||
index 73c60f5ea..46e70e155 100644
|
||
--- a/drivers/tty/serial/pxa.c
|
||
+++ b/drivers/tty/serial/pxa.c
|
||
@@ -225,14 +225,14 @@ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
|
||
iir = serial_in(up, UART_IIR);
|
||
if (iir & UART_IIR_NO_INT)
|
||
return IRQ_NONE;
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
lsr = serial_in(up, UART_LSR);
|
||
if (lsr & UART_LSR_DR)
|
||
receive_chars(up, &lsr);
|
||
check_modem_status(up);
|
||
if (lsr & UART_LSR_THRE)
|
||
transmit_chars(up);
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -242,9 +242,9 @@ static unsigned int serial_pxa_tx_empty(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -295,13 +295,13 @@ static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
|
||
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (break_state == -1)
|
||
up->lcr |= UART_LCR_SBC;
|
||
else
|
||
up->lcr &= ~UART_LCR_SBC;
|
||
serial_out(up, UART_LCR, up->lcr);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int serial_pxa_startup(struct uart_port *port)
|
||
@@ -346,10 +346,10 @@ static int serial_pxa_startup(struct uart_port *port)
|
||
*/
|
||
serial_out(up, UART_LCR, UART_LCR_WLEN8);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->port.mctrl |= TIOCM_OUT2;
|
||
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/*
|
||
* Finally, enable interrupts. Note: Modem status interrupts
|
||
@@ -383,10 +383,10 @@ static void serial_pxa_shutdown(struct uart_port *port)
|
||
up->ier = 0;
|
||
serial_out(up, UART_IER, 0);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->port.mctrl &= ~TIOCM_OUT2;
|
||
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/*
|
||
* Disable break condition and FIFOs
|
||
@@ -434,7 +434,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Ensure the port will be enabled.
|
||
@@ -504,7 +504,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
up->lcr = cval; /* Save LCR */
|
||
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
|
||
serial_out(up, UART_FCR, fcr);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static void
|
||
@@ -608,9 +608,9 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
|
||
if (up->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&up->port.lock);
|
||
+ locked = uart_port_trylock(&up->port);
|
||
else
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
/*
|
||
* First save the IER then disable the interrupts
|
||
@@ -628,7 +628,7 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
|
||
serial_out(up, UART_IER, ier);
|
||
|
||
if (locked)
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
local_irq_restore(flags);
|
||
clk_disable(up->clk);
|
||
|
||
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
|
||
index f820a09cb..a7db08431 100644
|
||
--- a/drivers/tty/serial/qcom_geni_serial.c
|
||
+++ b/drivers/tty/serial/qcom_geni_serial.c
|
||
@@ -492,9 +492,9 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
|
||
|
||
uport = &port->uport;
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&uport->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(uport, &flags);
|
||
else
|
||
- spin_lock_irqsave(&uport->lock, flags);
|
||
+ uart_port_lock_irqsave(uport, &flags);
|
||
|
||
geni_status = readl(uport->membase + SE_GENI_STATUS);
|
||
|
||
@@ -530,7 +530,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
|
||
qcom_geni_serial_setup_tx(uport, port->tx_remaining);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&uport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(uport, flags);
|
||
}
|
||
|
||
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
|
||
@@ -992,7 +992,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
|
||
if (uport->suspended)
|
||
return IRQ_NONE;
|
||
|
||
- spin_lock(&uport->lock);
|
||
+ uart_port_lock(uport);
|
||
|
||
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
|
||
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
|
||
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
|
||
index be5c842b5..d824c8318 100644
|
||
--- a/drivers/tty/serial/rda-uart.c
|
||
+++ b/drivers/tty/serial/rda-uart.c
|
||
@@ -139,12 +139,12 @@ static unsigned int rda_uart_tx_empty(struct uart_port *port)
|
||
unsigned int ret;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = rda_uart_read(port, RDA_UART_STATUS);
|
||
ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -246,7 +246,7 @@ static void rda_uart_set_termios(struct uart_port *port,
|
||
unsigned int baud;
|
||
u32 irq_mask;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
|
||
rda_uart_change_baudrate(rda_port, baud);
|
||
@@ -325,7 +325,7 @@ static void rda_uart_set_termios(struct uart_port *port,
|
||
/* update the per-port timeout */
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void rda_uart_send_chars(struct uart_port *port)
|
||
@@ -408,7 +408,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
|
||
unsigned long flags;
|
||
u32 val, irq_mask;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Clear IRQ cause */
|
||
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
|
||
@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
|
||
rda_uart_send_chars(port);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -436,16 +436,16 @@ static int rda_uart_startup(struct uart_port *port)
|
||
int ret;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
|
||
"rda-uart", port);
|
||
if (ret)
|
||
return ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
val = rda_uart_read(port, RDA_UART_CTRL);
|
||
val |= RDA_UART_ENABLE;
|
||
@@ -456,7 +456,7 @@ static int rda_uart_startup(struct uart_port *port)
|
||
val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
|
||
rda_uart_write(port, val, RDA_UART_IRQ_MASK);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -466,7 +466,7 @@ static void rda_uart_shutdown(struct uart_port *port)
|
||
unsigned long flags;
|
||
u32 val;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
rda_uart_stop_tx(port);
|
||
rda_uart_stop_rx(port);
|
||
@@ -475,7 +475,7 @@ static void rda_uart_shutdown(struct uart_port *port)
|
||
val &= ~RDA_UART_ENABLE;
|
||
rda_uart_write(port, val, RDA_UART_CTRL);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *rda_uart_type(struct uart_port *port)
|
||
@@ -515,7 +515,7 @@ static void rda_uart_config_port(struct uart_port *port, int flags)
|
||
rda_uart_request_port(port);
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, irq_flags);
|
||
+ uart_port_lock_irqsave(port, &irq_flags);
|
||
|
||
/* Clear mask, so no surprise interrupts. */
|
||
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
|
||
@@ -523,7 +523,7 @@ static void rda_uart_config_port(struct uart_port *port, int flags)
|
||
/* Clear status register */
|
||
rda_uart_write(port, 0, RDA_UART_STATUS);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, irq_flags);
|
||
+ uart_port_unlock_irqrestore(port, irq_flags);
|
||
}
|
||
|
||
static void rda_uart_release_port(struct uart_port *port)
|
||
@@ -597,9 +597,9 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
|
||
if (port->sysrq) {
|
||
locked = 0;
|
||
} else if (oops_in_progress) {
|
||
- locked = spin_trylock(&port->lock);
|
||
+ locked = uart_port_trylock(port);
|
||
} else {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
locked = 1;
|
||
}
|
||
|
||
@@ -615,7 +615,7 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
|
||
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
|
||
|
||
if (locked)
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
local_irq_restore(flags);
|
||
}
|
||
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
|
||
index 5a1de6044..45be2b881 100644
|
||
--- a/drivers/tty/serial/rp2.c
|
||
+++ b/drivers/tty/serial/rp2.c
|
||
@@ -276,9 +276,9 @@ static unsigned int rp2_uart_tx_empty(struct uart_port *port)
|
||
* But the TXEMPTY bit doesn't seem to work unless the TX IRQ is
|
||
* enabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
tx_fifo_bytes = readw(up->base + RP2_TX_FIFO_COUNT);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return tx_fifo_bytes ? 0 : TIOCSER_TEMT;
|
||
}
|
||
@@ -323,10 +323,10 @@ static void rp2_uart_break_ctl(struct uart_port *port, int break_state)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
rp2_rmw(port_to_up(port), RP2_TXRX_CTL, RP2_TXRX_CTL_BREAK_m,
|
||
break_state ? RP2_TXRX_CTL_BREAK_m : 0);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void rp2_uart_enable_ms(struct uart_port *port)
|
||
@@ -383,7 +383,7 @@ static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
if (tty_termios_baud_rate(new))
|
||
tty_termios_encode_baud_rate(new, baud, baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* ignore all characters if CREAD is not set */
|
||
port->ignore_status_mask = (new->c_cflag & CREAD) ? 0 : RP2_DUMMY_READ;
|
||
@@ -391,7 +391,7 @@ static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||
__rp2_uart_set_termios(up, new->c_cflag, new->c_iflag, baud_div);
|
||
uart_update_timeout(port, new->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void rp2_rx_chars(struct rp2_uart_port *up)
|
||
@@ -440,7 +440,7 @@ static void rp2_ch_interrupt(struct rp2_uart_port *up)
|
||
{
|
||
u32 status;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
|
||
/*
|
||
* The IRQ status bits are clear-on-write. Other status bits in
|
||
@@ -456,7 +456,7 @@ static void rp2_ch_interrupt(struct rp2_uart_port *up)
|
||
if (status & RP2_CHAN_STAT_MS_CHANGED_MASK)
|
||
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
|
||
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
}
|
||
|
||
static int rp2_asic_interrupt(struct rp2_card *card, unsigned int asic_id)
|
||
@@ -516,10 +516,10 @@ static void rp2_uart_shutdown(struct uart_port *port)
|
||
|
||
rp2_uart_break_ctl(port, 0);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
rp2_mask_ch_irq(up, up->idx, 0);
|
||
rp2_rmw(up, RP2_CHAN_STAT, 0, 0);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *rp2_uart_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
|
||
index ad011f1e2..be7bcd75d 100644
|
||
--- a/drivers/tty/serial/sa1100.c
|
||
+++ b/drivers/tty/serial/sa1100.c
|
||
@@ -115,9 +115,9 @@ static void sa1100_timeout(struct timer_list *t)
|
||
unsigned long flags;
|
||
|
||
if (sport->port.state) {
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
sa1100_mctrl_check(sport);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
|
||
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
|
||
}
|
||
@@ -247,7 +247,7 @@ static irqreturn_t sa1100_int(int irq, void *dev_id)
|
||
struct sa1100_port *sport = dev_id;
|
||
unsigned int status, pass_counter = 0;
|
||
|
||
- spin_lock(&sport->port.lock);
|
||
+ uart_port_lock(&sport->port);
|
||
status = UART_GET_UTSR0(sport);
|
||
status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
|
||
do {
|
||
@@ -276,7 +276,7 @@ static irqreturn_t sa1100_int(int irq, void *dev_id)
|
||
status &= SM_TO_UTSR0(sport->port.read_status_mask) |
|
||
~UTSR0_TFS;
|
||
} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
|
||
- spin_unlock(&sport->port.lock);
|
||
+ uart_port_unlock(&sport->port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -321,14 +321,14 @@ static void sa1100_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
unsigned int utcr3;
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
utcr3 = UART_GET_UTCR3(sport);
|
||
if (break_state == -1)
|
||
utcr3 |= UTCR3_BRK;
|
||
else
|
||
utcr3 &= ~UTCR3_BRK;
|
||
UART_PUT_UTCR3(sport, utcr3);
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static int sa1100_startup(struct uart_port *port)
|
||
@@ -354,9 +354,9 @@ static int sa1100_startup(struct uart_port *port)
|
||
/*
|
||
* Enable modem status interrupts
|
||
*/
|
||
- spin_lock_irq(&sport->port.lock);
|
||
+ uart_port_lock_irq(&sport->port);
|
||
sa1100_enable_ms(&sport->port);
|
||
- spin_unlock_irq(&sport->port.lock);
|
||
+ uart_port_unlock_irq(&sport->port);
|
||
|
||
return 0;
|
||
}
|
||
@@ -423,7 +423,7 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
del_timer_sync(&sport->timer);
|
||
|
||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||
|
||
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
|
||
sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
|
||
@@ -485,7 +485,7 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
|
||
sa1100_enable_ms(&sport->port);
|
||
|
||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||
}
|
||
|
||
static const char *sa1100_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
|
||
index 5a4d88e13..a82b65155 100644
|
||
--- a/drivers/tty/serial/samsung_tty.c
|
||
+++ b/drivers/tty/serial/samsung_tty.c
|
||
@@ -248,7 +248,7 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
|
||
unsigned int ucon, ufcon;
|
||
int count = 10000;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
while (--count && !s3c24xx_serial_txempty_nofifo(port))
|
||
udelay(100);
|
||
@@ -262,7 +262,7 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
|
||
wr_regl(port, S3C2410_UCON, ucon);
|
||
|
||
ourport->rx_enabled = 1;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void s3c24xx_serial_rx_disable(struct uart_port *port)
|
||
@@ -271,14 +271,14 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int ucon;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ucon = rd_regl(port, S3C2410_UCON);
|
||
ucon &= ~S3C2410_UCON_RXIRQMODE;
|
||
wr_regl(port, S3C2410_UCON, ucon);
|
||
|
||
ourport->rx_enabled = 0;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void s3c24xx_serial_stop_tx(struct uart_port *port)
|
||
@@ -344,7 +344,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args)
|
||
dma->tx_transfer_addr, dma->tx_size,
|
||
DMA_TO_DEVICE);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_xmit_advance(port, count);
|
||
ourport->tx_in_progress = 0;
|
||
@@ -353,7 +353,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args)
|
||
uart_write_wakeup(port);
|
||
|
||
s3c24xx_serial_start_next_tx(ourport);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
|
||
@@ -619,7 +619,7 @@ static void s3c24xx_serial_rx_dma_complete(void *args)
|
||
received = dma->rx_bytes_requested - state.residue;
|
||
async_tx_ack(dma->rx_desc);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
if (received)
|
||
s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
|
||
@@ -631,7 +631,7 @@ static void s3c24xx_serial_rx_dma_complete(void *args)
|
||
|
||
s3c64xx_start_rx_dma(ourport);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
|
||
@@ -722,7 +722,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
|
||
utrstat = rd_regl(port, S3C2410_UTRSTAT);
|
||
rd_regl(port, S3C2410_UFSTAT);
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
|
||
s3c64xx_start_rx_dma(ourport);
|
||
@@ -751,7 +751,7 @@ static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
|
||
wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
|
||
|
||
finish:
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -849,9 +849,9 @@ static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
|
||
struct s3c24xx_uart_port *ourport = dev_id;
|
||
struct uart_port *port = &ourport->port;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
s3c24xx_serial_rx_drain_fifo(ourport);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -932,11 +932,11 @@ static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
|
||
struct s3c24xx_uart_port *ourport = id;
|
||
struct uart_port *port = &ourport->port;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
s3c24xx_serial_tx_chars(ourport);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -1032,7 +1032,7 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
unsigned int ucon;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ucon = rd_regl(port, S3C2410_UCON);
|
||
|
||
@@ -1043,7 +1043,7 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
|
||
|
||
wr_regl(port, S3C2410_UCON, ucon);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
|
||
@@ -1302,7 +1302,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
|
||
ourport->rx_enabled = 1;
|
||
ourport->tx_enabled = 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ufcon = rd_regl(port, S3C2410_UFCON);
|
||
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
|
||
@@ -1312,7 +1312,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
|
||
|
||
enable_rx_pio(ourport);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Enable Rx Interrupt */
|
||
s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
|
||
@@ -1340,7 +1340,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
|
||
ourport->rx_enabled = 1;
|
||
ourport->tx_enabled = 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ufcon = rd_regl(port, S3C2410_UFCON);
|
||
ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
|
||
@@ -1350,7 +1350,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
|
||
|
||
enable_rx_pio(ourport);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Enable Rx Interrupt */
|
||
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
|
||
@@ -1625,7 +1625,7 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
|
||
ulcon |= S3C2410_LCON_PNONE;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
dev_dbg(port->dev,
|
||
"setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
|
||
@@ -1683,7 +1683,7 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
|
||
if ((termios->c_cflag & CREAD) == 0)
|
||
port->ignore_status_mask |= RXSTAT_DUMMY_READ;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *s3c24xx_serial_type(struct uart_port *port)
|
||
@@ -2375,14 +2375,14 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
|
||
if (cons_uart->sysrq)
|
||
locked = false;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&cons_uart->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(cons_uart, &flags);
|
||
else
|
||
- spin_lock_irqsave(&cons_uart->lock, flags);
|
||
+ uart_port_lock_irqsave(cons_uart, &flags);
|
||
|
||
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&cons_uart->lock, flags);
|
||
+ uart_port_unlock_irqrestore(cons_uart, flags);
|
||
}
|
||
|
||
/* Shouldn't be __init, as it can be instantiated from other module */
|
||
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
|
||
index f3cd69346..dbec29d9a 100644
|
||
--- a/drivers/tty/serial/sb1250-duart.c
|
||
+++ b/drivers/tty/serial/sb1250-duart.c
|
||
@@ -610,7 +610,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||
else
|
||
aux &= ~M_DUART_CTS_CHNG_ENA;
|
||
|
||
- spin_lock(&uport->lock);
|
||
+ uart_port_lock(uport);
|
||
|
||
if (sport->tx_stopped)
|
||
command |= M_DUART_TX_DIS;
|
||
@@ -632,7 +632,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||
|
||
write_sbdchn(sport, R_DUART_CMD, command);
|
||
|
||
- spin_unlock(&uport->lock);
|
||
+ uart_port_unlock(uport);
|
||
}
|
||
|
||
|
||
@@ -839,22 +839,22 @@ static void sbd_console_write(struct console *co, const char *s,
|
||
unsigned int mask;
|
||
|
||
/* Disable transmit interrupts and enable the transmitter. */
|
||
- spin_lock_irqsave(&uport->lock, flags);
|
||
+ uart_port_lock_irqsave(uport, &flags);
|
||
mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
|
||
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
|
||
mask & ~M_DUART_IMR_TX);
|
||
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
|
||
- spin_unlock_irqrestore(&uport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(uport, flags);
|
||
|
||
uart_console_write(&sport->port, s, count, sbd_console_putchar);
|
||
|
||
/* Restore transmit interrupts and the transmitter enable. */
|
||
- spin_lock_irqsave(&uport->lock, flags);
|
||
+ uart_port_lock_irqsave(uport, &flags);
|
||
sbd_line_drain(sport);
|
||
if (sport->tx_stopped)
|
||
write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
|
||
write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
|
||
- spin_unlock_irqrestore(&uport->lock, flags);
|
||
+ uart_port_unlock_irqrestore(uport, flags);
|
||
}
|
||
|
||
static int __init sbd_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
|
||
index 673aeda71..a227b3d3a 100644
|
||
--- a/drivers/tty/serial/sc16is7xx.c
|
||
+++ b/drivers/tty/serial/sc16is7xx.c
|
||
@@ -810,6 +810,7 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
|
||
{
|
||
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
|
||
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
|
||
+ unsigned long flags;
|
||
|
||
if ((port->rs485.flags & SER_RS485_ENABLED) &&
|
||
(port->rs485.delay_rts_before_send > 0))
|
||
@@ -818,6 +819,10 @@ static void sc16is7xx_tx_proc(struct kthread_work *ws)
|
||
mutex_lock(&one->efr_lock);
|
||
sc16is7xx_handle_tx(port);
|
||
mutex_unlock(&one->efr_lock);
|
||
+
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void sc16is7xx_reconf_rs485(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
|
||
index d4ec943cb..6d4006b41 100644
|
||
--- a/drivers/tty/serial/serial-tegra.c
|
||
+++ b/drivers/tty/serial/serial-tegra.c
|
||
@@ -411,7 +411,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
|
||
divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
|
||
}
|
||
|
||
- spin_lock_irqsave(&tup->uport.lock, flags);
|
||
+ uart_port_lock_irqsave(&tup->uport, &flags);
|
||
lcr = tup->lcr_shadow;
|
||
lcr |= UART_LCR_DLAB;
|
||
tegra_uart_write(tup, lcr, UART_LCR);
|
||
@@ -424,7 +424,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
|
||
|
||
/* Dummy read to ensure the write is posted */
|
||
tegra_uart_read(tup, UART_SCR);
|
||
- spin_unlock_irqrestore(&tup->uport.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&tup->uport, flags);
|
||
|
||
tup->current_baud = baud;
|
||
|
||
@@ -522,13 +522,13 @@ static void tegra_uart_tx_dma_complete(void *args)
|
||
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
|
||
count = tup->tx_bytes_requested - state.residue;
|
||
async_tx_ack(tup->tx_dma_desc);
|
||
- spin_lock_irqsave(&tup->uport.lock, flags);
|
||
+ uart_port_lock_irqsave(&tup->uport, &flags);
|
||
uart_xmit_advance(&tup->uport, count);
|
||
tup->tx_in_progress = 0;
|
||
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
||
uart_write_wakeup(&tup->uport);
|
||
tegra_uart_start_next_tx(tup);
|
||
- spin_unlock_irqrestore(&tup->uport.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&tup->uport, flags);
|
||
}
|
||
|
||
static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
|
||
@@ -598,13 +598,13 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
|
||
unsigned int ret = 0;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&u->lock, flags);
|
||
+ uart_port_lock_irqsave(u, &flags);
|
||
if (!tup->tx_in_progress) {
|
||
unsigned long lsr = tegra_uart_read(tup, UART_LSR);
|
||
if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
|
||
ret = TIOCSER_TEMT;
|
||
}
|
||
- spin_unlock_irqrestore(&u->lock, flags);
|
||
+ uart_port_unlock_irqrestore(u, flags);
|
||
return ret;
|
||
}
|
||
|
||
@@ -727,7 +727,7 @@ static void tegra_uart_rx_dma_complete(void *args)
|
||
struct dma_tx_state state;
|
||
enum dma_status status;
|
||
|
||
- spin_lock_irqsave(&u->lock, flags);
|
||
+ uart_port_lock_irqsave(u, &flags);
|
||
|
||
status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
|
||
|
||
@@ -749,7 +749,7 @@ static void tegra_uart_rx_dma_complete(void *args)
|
||
set_rts(tup, true);
|
||
|
||
done:
|
||
- spin_unlock_irqrestore(&u->lock, flags);
|
||
+ uart_port_unlock_irqrestore(u, flags);
|
||
}
|
||
|
||
static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
|
||
@@ -836,7 +836,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
|
||
bool is_rx_int = false;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&u->lock, flags);
|
||
+ uart_port_lock_irqsave(u, &flags);
|
||
while (1) {
|
||
iir = tegra_uart_read(tup, UART_IIR);
|
||
if (iir & UART_IIR_NO_INT) {
|
||
@@ -852,7 +852,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
|
||
} else if (is_rx_start) {
|
||
tegra_uart_start_rx_dma(tup);
|
||
}
|
||
- spin_unlock_irqrestore(&u->lock, flags);
|
||
+ uart_port_unlock_irqrestore(u, flags);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -969,11 +969,11 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
|
||
}
|
||
}
|
||
|
||
- spin_lock_irqsave(&tup->uport.lock, flags);
|
||
+ uart_port_lock_irqsave(&tup->uport, &flags);
|
||
/* Reset the Rx and Tx FIFOs */
|
||
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
|
||
tup->current_baud = 0;
|
||
- spin_unlock_irqrestore(&tup->uport.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&tup->uport, flags);
|
||
|
||
tup->rx_in_progress = 0;
|
||
tup->tx_in_progress = 0;
|
||
@@ -1292,7 +1292,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
|
||
int ret;
|
||
|
||
max_divider *= 16;
|
||
- spin_lock_irqsave(&u->lock, flags);
|
||
+ uart_port_lock_irqsave(u, &flags);
|
||
|
||
/* Changing configuration, it is safe to stop any rx now */
|
||
if (tup->rts_active)
|
||
@@ -1341,7 +1341,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
|
||
baud = uart_get_baud_rate(u, termios, oldtermios,
|
||
parent_clk_rate/max_divider,
|
||
parent_clk_rate/16);
|
||
- spin_unlock_irqrestore(&u->lock, flags);
|
||
+ uart_port_unlock_irqrestore(u, flags);
|
||
ret = tegra_set_baudrate(tup, baud);
|
||
if (ret < 0) {
|
||
dev_err(tup->uport.dev, "Failed to set baud rate\n");
|
||
@@ -1349,7 +1349,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
|
||
}
|
||
if (tty_termios_baud_rate(termios))
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
- spin_lock_irqsave(&u->lock, flags);
|
||
+ uart_port_lock_irqsave(u, &flags);
|
||
|
||
/* Flow control */
|
||
if (termios->c_cflag & CRTSCTS) {
|
||
@@ -1382,7 +1382,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
|
||
if (termios->c_iflag & IGNBRK)
|
||
tup->uport.ignore_status_mask |= UART_LSR_BI;
|
||
|
||
- spin_unlock_irqrestore(&u->lock, flags);
|
||
+ uart_port_unlock_irqrestore(u, flags);
|
||
}
|
||
|
||
static const char *tegra_uart_type(struct uart_port *u)
|
||
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
|
||
index 77ff27268..8c0b29ff9 100644
|
||
--- a/drivers/tty/serial/serial_core.c
|
||
+++ b/drivers/tty/serial/serial_core.c
|
||
@@ -79,7 +79,7 @@ static inline void uart_port_deref(struct uart_port *uport)
|
||
({ \
|
||
struct uart_port *__uport = uart_port_ref(state); \
|
||
if (__uport) \
|
||
- spin_lock_irqsave(&__uport->lock, flags); \
|
||
+ uart_port_lock_irqsave(__uport, &flags); \
|
||
__uport; \
|
||
})
|
||
|
||
@@ -87,7 +87,7 @@ static inline void uart_port_deref(struct uart_port *uport)
|
||
({ \
|
||
struct uart_port *__uport = uport; \
|
||
if (__uport) { \
|
||
- spin_unlock_irqrestore(&__uport->lock, flags); \
|
||
+ uart_port_unlock_irqrestore(__uport, flags); \
|
||
uart_port_deref(__uport); \
|
||
} \
|
||
})
|
||
@@ -179,12 +179,12 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
|
||
unsigned long flags;
|
||
unsigned int old;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
old = port->mctrl;
|
||
port->mctrl = (old & ~clear) | set;
|
||
if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
|
||
@@ -219,7 +219,7 @@ static void uart_change_line_settings(struct tty_struct *tty, struct uart_state
|
||
/*
|
||
* Set modem status enables based on termios cflag
|
||
*/
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
if (termios->c_cflag & CRTSCTS)
|
||
uport->status |= UPSTAT_CTS_ENABLE;
|
||
else
|
||
@@ -240,7 +240,7 @@ static void uart_change_line_settings(struct tty_struct *tty, struct uart_state
|
||
else
|
||
__uart_start(state);
|
||
}
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
}
|
||
|
||
/*
|
||
@@ -717,11 +717,11 @@ static void uart_send_xchar(struct tty_struct *tty, char ch)
|
||
if (port->ops->send_xchar)
|
||
port->ops->send_xchar(port, ch);
|
||
else {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->x_char = ch;
|
||
if (ch)
|
||
port->ops->start_tx(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
uart_port_deref(port);
|
||
}
|
||
@@ -1107,10 +1107,10 @@ static int uart_tiocmget(struct tty_struct *tty)
|
||
goto out;
|
||
|
||
if (!tty_io_error(tty)) {
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
result = uport->mctrl;
|
||
result |= uport->ops->get_mctrl(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
}
|
||
out:
|
||
mutex_unlock(&port->mutex);
|
||
@@ -1246,16 +1246,16 @@ static int uart_wait_modem_status(struct uart_state *state, unsigned long arg)
|
||
uport = uart_port_ref(state);
|
||
if (!uport)
|
||
return -EIO;
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
|
||
uart_enable_ms(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
|
||
add_wait_queue(&port->delta_msr_wait, &wait);
|
||
for (;;) {
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
|
||
set_current_state(TASK_INTERRUPTIBLE);
|
||
|
||
@@ -1300,9 +1300,9 @@ static int uart_get_icount(struct tty_struct *tty,
|
||
uport = uart_port_ref(state);
|
||
if (!uport)
|
||
return -EIO;
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
uart_port_deref(uport);
|
||
|
||
icount->cts = cnow.cts;
|
||
@@ -1455,9 +1455,9 @@ static int uart_rs485_config(struct uart_port *port)
|
||
uart_set_rs485_termination(port, rs485);
|
||
uart_set_rs485_rx_during_tx(port, rs485);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ret = port->rs485_config(port, NULL, rs485);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
if (ret) {
|
||
memset(rs485, 0, sizeof(*rs485));
|
||
/* unset GPIOs */
|
||
@@ -1474,9 +1474,9 @@ static int uart_get_rs485_config(struct uart_port *port,
|
||
unsigned long flags;
|
||
struct serial_rs485 aux;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
aux = port->rs485;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (copy_to_user(rs485, &aux, sizeof(aux)))
|
||
return -EFAULT;
|
||
@@ -1504,7 +1504,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
|
||
uart_set_rs485_termination(port, &rs485);
|
||
uart_set_rs485_rx_during_tx(port, &rs485);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ret = port->rs485_config(port, &tty->termios, &rs485);
|
||
if (!ret) {
|
||
port->rs485 = rs485;
|
||
@@ -1513,7 +1513,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
|
||
if (!(rs485.flags & SER_RS485_ENABLED))
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
}
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
if (ret) {
|
||
/* restore old GPIO settings */
|
||
gpiod_set_value_cansleep(port->rs485_term_gpio,
|
||
@@ -1538,9 +1538,9 @@ static int uart_get_iso7816_config(struct uart_port *port,
|
||
if (!port->iso7816_config)
|
||
return -ENOTTY;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
aux = port->iso7816;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (copy_to_user(iso7816, &aux, sizeof(aux)))
|
||
return -EFAULT;
|
||
@@ -1569,9 +1569,9 @@ static int uart_set_iso7816_config(struct uart_port *port,
|
||
if (iso7816.reserved[i])
|
||
return -EINVAL;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ret = port->iso7816_config(port, &iso7816);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
if (ret)
|
||
return ret;
|
||
|
||
@@ -1788,9 +1788,9 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||
if (WARN(!uport, "detached port still initialized!\n"))
|
||
return;
|
||
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
uport->ops->stop_rx(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
|
||
serial_base_port_shutdown(uport);
|
||
uart_port_shutdown(port);
|
||
@@ -1805,11 +1805,11 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||
/*
|
||
* Free the transmit buffer.
|
||
*/
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
uart_circ_clear(&state->xmit);
|
||
buf = state->xmit.buf;
|
||
state->xmit.buf = NULL;
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
|
||
free_page((unsigned long)buf);
|
||
|
||
@@ -1952,10 +1952,10 @@ static bool uart_carrier_raised(struct tty_port *port)
|
||
*/
|
||
if (WARN_ON(!uport))
|
||
return true;
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
uart_enable_ms(uport);
|
||
mctrl = uport->ops->get_mctrl(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
uart_port_deref(uport);
|
||
|
||
return mctrl & TIOCM_CAR;
|
||
@@ -2072,9 +2072,9 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
|
||
pm_state = state->pm_state;
|
||
if (pm_state != UART_PM_STATE_ON)
|
||
uart_change_pm(state, UART_PM_STATE_ON);
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
status = uport->ops->get_mctrl(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
if (pm_state != UART_PM_STATE_ON)
|
||
uart_change_pm(state, pm_state);
|
||
|
||
@@ -2413,9 +2413,9 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
|
||
*/
|
||
if (!console_suspend_enabled && uart_console(uport)) {
|
||
if (uport->ops->start_rx) {
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
uport->ops->stop_rx(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
}
|
||
goto unlock;
|
||
}
|
||
@@ -2430,7 +2430,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
|
||
tty_port_set_suspended(port, true);
|
||
tty_port_set_initialized(port, false);
|
||
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
ops->stop_tx(uport);
|
||
if (!(uport->rs485.flags & SER_RS485_ENABLED))
|
||
ops->set_mctrl(uport, 0);
|
||
@@ -2438,7 +2438,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
|
||
mctrl = uport->mctrl;
|
||
uport->mctrl = 0;
|
||
ops->stop_rx(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
|
||
/*
|
||
* Wait for the transmitter to empty.
|
||
@@ -2510,9 +2510,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
|
||
uart_change_pm(state, UART_PM_STATE_ON);
|
||
uport->ops->set_termios(uport, &termios, NULL);
|
||
if (!console_suspend_enabled && uport->ops->start_rx) {
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
uport->ops->start_rx(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
}
|
||
if (console_suspend_enabled)
|
||
console_start(uport->cons);
|
||
@@ -2523,10 +2523,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
|
||
int ret;
|
||
|
||
uart_change_pm(state, UART_PM_STATE_ON);
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
if (!(uport->rs485.flags & SER_RS485_ENABLED))
|
||
ops->set_mctrl(uport, 0);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
if (console_suspend_enabled || !uart_console(uport)) {
|
||
/* Protected by port mutex for now */
|
||
struct tty_struct *tty = port->tty;
|
||
@@ -2536,11 +2536,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
|
||
if (tty)
|
||
uart_change_line_settings(tty, state, NULL);
|
||
uart_rs485_config(uport);
|
||
- spin_lock_irq(&uport->lock);
|
||
+ uart_port_lock_irq(uport);
|
||
if (!(uport->rs485.flags & SER_RS485_ENABLED))
|
||
ops->set_mctrl(uport, uport->mctrl);
|
||
ops->start_tx(uport);
|
||
- spin_unlock_irq(&uport->lock);
|
||
+ uart_port_unlock_irq(uport);
|
||
tty_port_set_initialized(port, true);
|
||
} else {
|
||
/*
|
||
@@ -2652,11 +2652,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
|
||
* keep the DTR setting that is set in uart_set_options()
|
||
* We probably don't need a spinlock around this, but
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
port->mctrl &= TIOCM_DTR;
|
||
if (!(port->rs485.flags & SER_RS485_ENABLED))
|
||
port->ops->set_mctrl(port, port->mctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
uart_rs485_config(port);
|
||
|
||
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
|
||
index 7d5aaa8d4..e51ca593a 100644
|
||
--- a/drivers/tty/serial/serial_mctrl_gpio.c
|
||
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
|
||
@@ -184,7 +184,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
|
||
|
||
mctrl_gpio_get(gpios, &mctrl);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
mctrl_diff = mctrl ^ gpios->mctrl_prev;
|
||
gpios->mctrl_prev = mctrl;
|
||
@@ -205,7 +205,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
|
||
wake_up_interruptible(&port->state->port.delta_msr_wait);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
|
||
index ed3953bd0..92a57b243 100644
|
||
--- a/drivers/tty/serial/serial_port.c
|
||
+++ b/drivers/tty/serial/serial_port.c
|
||
@@ -35,14 +35,14 @@ static int serial_port_runtime_resume(struct device *dev)
|
||
goto out;
|
||
|
||
/* Flush any pending TX for the port */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (!port_dev->tx_enabled)
|
||
goto unlock;
|
||
if (__serial_port_busy(port))
|
||
port->ops->start_tx(port);
|
||
|
||
unlock:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
out:
|
||
pm_runtime_mark_last_busy(dev);
|
||
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
|
||
index be08fb6f7..eaa980722 100644
|
||
--- a/drivers/tty/serial/serial_txx9.c
|
||
+++ b/drivers/tty/serial/serial_txx9.c
|
||
@@ -335,13 +335,13 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
|
||
unsigned int status;
|
||
|
||
while (1) {
|
||
- spin_lock(&up->lock);
|
||
+ uart_port_lock(up);
|
||
status = sio_in(up, TXX9_SIDISR);
|
||
if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
|
||
status &= ~TXX9_SIDISR_TDIS;
|
||
if (!(status & (TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
|
||
TXX9_SIDISR_TOUT))) {
|
||
- spin_unlock(&up->lock);
|
||
+ uart_port_unlock(up);
|
||
break;
|
||
}
|
||
|
||
@@ -353,7 +353,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
|
||
sio_mask(up, TXX9_SIDISR,
|
||
TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS |
|
||
TXX9_SIDISR_TOUT);
|
||
- spin_unlock(&up->lock);
|
||
+ uart_port_unlock(up);
|
||
|
||
if (pass_counter++ > PASS_LIMIT)
|
||
break;
|
||
@@ -367,9 +367,9 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *up)
|
||
unsigned long flags;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -399,12 +399,12 @@ static void serial_txx9_break_ctl(struct uart_port *up, int break_state)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
if (break_state == -1)
|
||
sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
|
||
else
|
||
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
}
|
||
|
||
#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
|
||
@@ -517,9 +517,9 @@ static int serial_txx9_startup(struct uart_port *up)
|
||
/*
|
||
* Now, initialize the UART
|
||
*/
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
serial_txx9_set_mctrl(up, up->mctrl);
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
|
||
/* Enable RX/TX */
|
||
sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
|
||
@@ -541,9 +541,9 @@ static void serial_txx9_shutdown(struct uart_port *up)
|
||
*/
|
||
sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
|
||
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
serial_txx9_set_mctrl(up, up->mctrl);
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
|
||
/*
|
||
* Disable break condition
|
||
@@ -625,7 +625,7 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->lock, flags);
|
||
+ uart_port_lock_irqsave(up, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -676,7 +676,7 @@ serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
|
||
sio_out(up, TXX9_SIFCR, fcr);
|
||
|
||
serial_txx9_set_mctrl(up, up->mctrl);
|
||
- spin_unlock_irqrestore(&up->lock, flags);
|
||
+ uart_port_unlock_irqrestore(up, flags);
|
||
}
|
||
|
||
static void
|
||
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
|
||
index f793624fd..2559c9781 100644
|
||
--- a/drivers/tty/serial/sh-sci.c
|
||
+++ b/drivers/tty/serial/sh-sci.c
|
||
@@ -1205,7 +1205,7 @@ static void sci_dma_tx_complete(void *arg)
|
||
|
||
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_xmit_advance(port, s->tx_dma_len);
|
||
|
||
@@ -1229,7 +1229,7 @@ static void sci_dma_tx_complete(void *arg)
|
||
}
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Locking: called with port lock held */
|
||
@@ -1325,7 +1325,7 @@ static void sci_dma_rx_complete(void *arg)
|
||
dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
|
||
s->active_rx);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
active = sci_dma_rx_find_active(s);
|
||
if (active >= 0)
|
||
@@ -1352,20 +1352,20 @@ static void sci_dma_rx_complete(void *arg)
|
||
|
||
dma_async_issue_pending(chan);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
|
||
__func__, s->cookie_rx[active], active, s->active_rx);
|
||
return;
|
||
|
||
fail:
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
|
||
/* Switch to PIO */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
dmaengine_terminate_async(chan);
|
||
sci_dma_rx_chan_invalidate(s);
|
||
sci_dma_rx_reenable_irq(s);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void sci_dma_tx_release(struct sci_port *s)
|
||
@@ -1414,13 +1414,13 @@ static int sci_dma_rx_submit(struct sci_port *s, bool port_lock_held)
|
||
fail:
|
||
/* Switch to PIO */
|
||
if (!port_lock_held)
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
if (i)
|
||
dmaengine_terminate_async(chan);
|
||
sci_dma_rx_chan_invalidate(s);
|
||
sci_start_rx(port);
|
||
if (!port_lock_held)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return -EAGAIN;
|
||
}
|
||
|
||
@@ -1442,14 +1442,14 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
||
* transmit till the end, and then the rest. Take the port lock to get a
|
||
* consistent xmit buffer state.
|
||
*/
|
||
- spin_lock_irq(&port->lock);
|
||
+ uart_port_lock_irq(port);
|
||
head = xmit->head;
|
||
tail = xmit->tail;
|
||
buf = s->tx_dma_addr + tail;
|
||
s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
|
||
if (!s->tx_dma_len) {
|
||
/* Transmit buffer has been flushed */
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
return;
|
||
}
|
||
|
||
@@ -1457,7 +1457,7 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
||
DMA_MEM_TO_DEV,
|
||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||
if (!desc) {
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
|
||
goto switch_to_pio;
|
||
}
|
||
@@ -1469,12 +1469,12 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
||
desc->callback_param = s;
|
||
s->cookie_tx = dmaengine_submit(desc);
|
||
if (dma_submit_error(s->cookie_tx)) {
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
|
||
goto switch_to_pio;
|
||
}
|
||
|
||
- spin_unlock_irq(&port->lock);
|
||
+ uart_port_unlock_irq(port);
|
||
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
|
||
__func__, xmit->buf, tail, head, s->cookie_tx);
|
||
|
||
@@ -1482,10 +1482,10 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
||
return;
|
||
|
||
switch_to_pio:
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
s->chan_tx = NULL;
|
||
sci_start_tx(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return;
|
||
}
|
||
|
||
@@ -1502,17 +1502,17 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
|
||
|
||
dev_dbg(port->dev, "DMA Rx timed out\n");
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
active = sci_dma_rx_find_active(s);
|
||
if (active < 0) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
|
||
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
|
||
if (status == DMA_COMPLETE) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
|
||
s->active_rx, active);
|
||
|
||
@@ -1530,7 +1530,7 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
|
||
*/
|
||
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
|
||
if (status == DMA_COMPLETE) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
@@ -1551,7 +1551,7 @@ static enum hrtimer_restart sci_dma_rx_timer_fn(struct hrtimer *t)
|
||
|
||
sci_dma_rx_reenable_irq(s);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return HRTIMER_NORESTART;
|
||
}
|
||
@@ -1775,9 +1775,9 @@ static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
|
||
struct uart_port *port = ptr;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
sci_transmit_chars(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -1791,11 +1791,11 @@ static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
|
||
if (port->type != PORT_SCI)
|
||
return sci_tx_interrupt(irq, ptr);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ctrl = serial_port_in(port, SCSCR);
|
||
ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
|
||
serial_port_out(port, SCSCR, ctrl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -2192,7 +2192,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
|
||
return;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
scsptr = serial_port_in(port, SCSPTR);
|
||
scscr = serial_port_in(port, SCSCR);
|
||
|
||
@@ -2206,7 +2206,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
|
||
|
||
serial_port_out(port, SCSPTR, scsptr);
|
||
serial_port_out(port, SCSCR, scscr);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int sci_startup(struct uart_port *port)
|
||
@@ -2238,7 +2238,7 @@ static void sci_shutdown(struct uart_port *port)
|
||
s->autorts = false;
|
||
mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
sci_stop_rx(port);
|
||
sci_stop_tx(port);
|
||
/*
|
||
@@ -2248,7 +2248,7 @@ static void sci_shutdown(struct uart_port *port)
|
||
scr = serial_port_in(port, SCSCR);
|
||
serial_port_out(port, SCSCR, scr &
|
||
(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
||
if (s->chan_rx_saved) {
|
||
@@ -2550,7 +2550,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
serial_port_out(port, SCCKS, sccks);
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
sci_reset(port);
|
||
|
||
@@ -2672,7 +2672,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
if ((termios->c_cflag & CREAD) != 0)
|
||
sci_start_rx(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
sci_port_disable(s);
|
||
|
||
@@ -3057,9 +3057,9 @@ static void serial_console_write(struct console *co, const char *s,
|
||
if (port->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* first save SCSCR then disable interrupts, keep clock source */
|
||
ctrl = serial_port_in(port, SCSCR);
|
||
@@ -3079,7 +3079,7 @@ static void serial_console_write(struct console *co, const char *s,
|
||
serial_port_out(port, SCSCR, ctrl);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int serial_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
|
||
index d195c5de5..b296e57a9 100644
|
||
--- a/drivers/tty/serial/sifive.c
|
||
+++ b/drivers/tty/serial/sifive.c
|
||
@@ -521,11 +521,11 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
|
||
struct sifive_serial_port *ssp = dev_id;
|
||
u32 ip;
|
||
|
||
- spin_lock(&ssp->port.lock);
|
||
+ uart_port_lock(&ssp->port);
|
||
|
||
ip = __ssp_readl(ssp, SIFIVE_SERIAL_IP_OFFS);
|
||
if (!ip) {
|
||
- spin_unlock(&ssp->port.lock);
|
||
+ uart_port_unlock(&ssp->port);
|
||
return IRQ_NONE;
|
||
}
|
||
|
||
@@ -534,7 +534,7 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
|
||
if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
|
||
__ssp_transmit_chars(ssp);
|
||
|
||
- spin_unlock(&ssp->port.lock);
|
||
+ uart_port_unlock(&ssp->port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -653,7 +653,7 @@ static void sifive_serial_set_termios(struct uart_port *port,
|
||
ssp->port.uartclk / 16);
|
||
__ssp_update_baud_rate(ssp, rate);
|
||
|
||
- spin_lock_irqsave(&ssp->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&ssp->port, &flags);
|
||
|
||
/* Update the per-port timeout */
|
||
uart_update_timeout(port, termios->c_cflag, rate);
|
||
@@ -670,7 +670,7 @@ static void sifive_serial_set_termios(struct uart_port *port,
|
||
if (v != old_v)
|
||
__ssp_writel(v, SIFIVE_SERIAL_RXCTRL_OFFS, ssp);
|
||
|
||
- spin_unlock_irqrestore(&ssp->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&ssp->port, flags);
|
||
}
|
||
|
||
static void sifive_serial_release_port(struct uart_port *port)
|
||
@@ -795,9 +795,9 @@ static void sifive_serial_console_write(struct console *co, const char *s,
|
||
if (ssp->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&ssp->port.lock);
|
||
+ locked = uart_port_trylock(&ssp->port);
|
||
else
|
||
- spin_lock(&ssp->port.lock);
|
||
+ uart_port_lock(&ssp->port);
|
||
|
||
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
|
||
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
|
||
@@ -807,7 +807,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
|
||
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
|
||
|
||
if (locked)
|
||
- spin_unlock(&ssp->port.lock);
|
||
+ uart_port_unlock(&ssp->port);
|
||
local_irq_restore(flags);
|
||
}
|
||
|
||
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
|
||
index f328fa572..f257525f9 100644
|
||
--- a/drivers/tty/serial/sprd_serial.c
|
||
+++ b/drivers/tty/serial/sprd_serial.c
|
||
@@ -247,7 +247,7 @@ static void sprd_complete_tx_dma(void *data)
|
||
struct circ_buf *xmit = &port->state->xmit;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
dma_unmap_single(port->dev, sp->tx_dma.phys_addr,
|
||
sp->tx_dma.trans_len, DMA_TO_DEVICE);
|
||
|
||
@@ -260,7 +260,7 @@ static void sprd_complete_tx_dma(void *data)
|
||
sprd_tx_dma_config(port))
|
||
sp->tx_dma.trans_len = 0;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int sprd_uart_dma_submit(struct uart_port *port,
|
||
@@ -429,13 +429,13 @@ static void sprd_complete_rx_dma(void *data)
|
||
enum dma_status status;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
status = dmaengine_tx_status(sp->rx_dma.chn,
|
||
sp->rx_dma.cookie, &state);
|
||
if (status != DMA_COMPLETE) {
|
||
sprd_stop_rx(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return;
|
||
}
|
||
|
||
@@ -449,7 +449,7 @@ static void sprd_complete_rx_dma(void *data)
|
||
if (sprd_start_dma_rx(port))
|
||
sprd_stop_rx(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int sprd_start_dma_rx(struct uart_port *port)
|
||
@@ -638,12 +638,12 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
|
||
struct uart_port *port = dev_id;
|
||
unsigned int ims;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
ims = serial_in(port, SPRD_IMSR);
|
||
|
||
if (!ims) {
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
return IRQ_NONE;
|
||
}
|
||
|
||
@@ -660,7 +660,7 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
|
||
if (ims & SPRD_IMSR_TX_FIFO_EMPTY)
|
||
sprd_tx(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -727,13 +727,13 @@ static int sprd_startup(struct uart_port *port)
|
||
serial_out(port, SPRD_CTL1, fc);
|
||
|
||
/* enable interrupt */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ien = serial_in(port, SPRD_IEN);
|
||
ien |= SPRD_IEN_BREAK_DETECT | SPRD_IEN_TIMEOUT;
|
||
if (!sp->rx_dma.enable)
|
||
ien |= SPRD_IEN_RX_FULL;
|
||
serial_out(port, SPRD_IEN, ien);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -793,7 +793,7 @@ static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
lcr |= SPRD_LCR_EVEN_PAR;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* update the per-port timeout */
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
@@ -837,7 +837,7 @@ static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
fc |= RX_TOUT_THLD_DEF | RX_HFC_THLD_DEF;
|
||
serial_out(port, SPRD_CTL1, fc);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Don't rewrite B0 */
|
||
if (tty_termios_baud_rate(termios))
|
||
@@ -974,9 +974,9 @@ static void sprd_console_write(struct console *co, const char *s,
|
||
if (port->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_console_write(port, s, count, sprd_console_putchar);
|
||
|
||
@@ -984,7 +984,7 @@ static void sprd_console_write(struct console *co, const char *s,
|
||
wait_for_xmitr(port);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int sprd_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
|
||
index 92b9f6894..a821f5d76 100644
|
||
--- a/drivers/tty/serial/st-asc.c
|
||
+++ b/drivers/tty/serial/st-asc.c
|
||
@@ -319,7 +319,7 @@ static irqreturn_t asc_interrupt(int irq, void *ptr)
|
||
struct uart_port *port = ptr;
|
||
u32 status;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
status = asc_in(port, ASC_STA);
|
||
|
||
@@ -334,7 +334,7 @@ static irqreturn_t asc_interrupt(int irq, void *ptr)
|
||
asc_transmit_chars(port);
|
||
}
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -452,10 +452,10 @@ static void asc_pm(struct uart_port *port, unsigned int state,
|
||
* we can come to turning it off. Note this is not called with
|
||
* the port spinlock held.
|
||
*/
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
|
||
asc_out(port, ASC_CTL, ctl);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
clk_disable_unprepare(ascport->clk);
|
||
break;
|
||
}
|
||
@@ -480,7 +480,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
|
||
cflag = termios->c_cflag;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* read control register */
|
||
ctrl_val = asc_in(port, ASC_CTL);
|
||
@@ -594,7 +594,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
/* write final value and enable port */
|
||
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *asc_type(struct uart_port *port)
|
||
@@ -849,9 +849,9 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
|
||
if (port->sysrq)
|
||
locked = 0; /* asc_interrupt has already claimed the lock */
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* Disable interrupts so we don't get the IRQ line bouncing
|
||
@@ -869,7 +869,7 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
|
||
asc_out(port, ASC_INTEN, intenable);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int asc_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
|
||
index 9ef90bb30..b963f9ccb 100644
|
||
--- a/drivers/tty/serial/stm32-usart.c
|
||
+++ b/drivers/tty/serial/stm32-usart.c
|
||
@@ -535,7 +535,7 @@ static void stm32_usart_rx_dma_complete(void *arg)
|
||
unsigned int size;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
size = stm32_usart_receive_chars(port, false);
|
||
uart_unlock_and_check_sysrq_irqrestore(port, flags);
|
||
if (size)
|
||
@@ -641,9 +641,9 @@ static void stm32_usart_tx_dma_complete(void *arg)
|
||
stm32_usart_tx_dma_terminate(stm32port);
|
||
|
||
/* Let's see if we have pending data to send */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
stm32_usart_transmit_chars(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
|
||
@@ -892,7 +892,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
|
||
if (!stm32_port->throttled) {
|
||
if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
|
||
((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
size = stm32_usart_receive_chars(port, false);
|
||
uart_unlock_and_check_sysrq(port);
|
||
if (size)
|
||
@@ -902,15 +902,15 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
|
||
}
|
||
|
||
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
stm32_usart_transmit_chars(port);
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
ret = IRQ_HANDLED;
|
||
}
|
||
|
||
/* Receiver timeout irq for DMA RX */
|
||
if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
size = stm32_usart_receive_chars(port, false);
|
||
uart_unlock_and_check_sysrq(port);
|
||
if (size)
|
||
@@ -999,7 +999,7 @@ static void stm32_usart_throttle(struct uart_port *port)
|
||
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/*
|
||
* Pause DMA transfer, so the RX data gets queued into the FIFO.
|
||
@@ -1012,7 +1012,7 @@ static void stm32_usart_throttle(struct uart_port *port)
|
||
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
|
||
|
||
stm32_port->throttled = true;
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Unthrottle the remote, the input buffer can now accept data. */
|
||
@@ -1022,7 +1022,7 @@ static void stm32_usart_unthrottle(struct uart_port *port)
|
||
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
|
||
if (stm32_port->cr3_irq)
|
||
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
|
||
@@ -1036,7 +1036,7 @@ static void stm32_usart_unthrottle(struct uart_port *port)
|
||
if (stm32_port->rx_ch)
|
||
stm32_usart_rx_dma_start_or_resume(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Receive stop */
|
||
@@ -1165,7 +1165,7 @@ static void stm32_usart_set_termios(struct uart_port *port,
|
||
|
||
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
|
||
isr,
|
||
@@ -1356,7 +1356,7 @@ static void stm32_usart_set_termios(struct uart_port *port,
|
||
writel_relaxed(cr1, port->membase + ofs->cr1);
|
||
|
||
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
/* Handle modem control interrupts */
|
||
if (UART_ENABLE_MS(port, termios->c_cflag))
|
||
@@ -1406,9 +1406,9 @@ static void stm32_usart_pm(struct uart_port *port, unsigned int state,
|
||
pm_runtime_get_sync(port->dev);
|
||
break;
|
||
case UART_PM_STATE_OFF:
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
pm_runtime_put_sync(port->dev);
|
||
break;
|
||
}
|
||
@@ -1891,9 +1891,9 @@ static void stm32_usart_console_write(struct console *co, const char *s,
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Save and disable interrupts, enable the transmitter */
|
||
old_cr1 = readl_relaxed(port->membase + ofs->cr1);
|
||
@@ -1907,7 +1907,7 @@ static void stm32_usart_console_write(struct console *co, const char *s,
|
||
writel_relaxed(old_cr1, port->membase + ofs->cr1);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int stm32_usart_console_setup(struct console *co, char *options)
|
||
@@ -2042,7 +2042,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
|
||
* low-power mode.
|
||
*/
|
||
if (stm32_port->rx_ch) {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* Poll data from DMA RX buffer if any */
|
||
if (!stm32_usart_rx_dma_pause(stm32_port))
|
||
size += stm32_usart_receive_chars(port, true);
|
||
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
|
||
index c671d674b..5bfc0040f 100644
|
||
--- a/drivers/tty/serial/sunhv.c
|
||
+++ b/drivers/tty/serial/sunhv.c
|
||
@@ -217,10 +217,10 @@ static irqreturn_t sunhv_interrupt(int irq, void *dev_id)
|
||
struct tty_port *tport;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
tport = receive_chars(port);
|
||
transmit_chars(port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (tport)
|
||
tty_flip_buffer_push(tport);
|
||
@@ -271,7 +271,7 @@ static void sunhv_send_xchar(struct uart_port *port, char ch)
|
||
if (ch == __DISABLED_CHAR)
|
||
return;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
while (limit-- > 0) {
|
||
long status = sun4v_con_putchar(ch);
|
||
@@ -280,7 +280,7 @@ static void sunhv_send_xchar(struct uart_port *port, char ch)
|
||
udelay(1);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* port->lock held by caller. */
|
||
@@ -295,7 +295,7 @@ static void sunhv_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
int limit = 10000;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
while (limit-- > 0) {
|
||
long status = sun4v_con_putchar(CON_BREAK);
|
||
@@ -304,7 +304,7 @@ static void sunhv_break_ctl(struct uart_port *port, int break_state)
|
||
udelay(1);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
}
|
||
|
||
@@ -328,7 +328,7 @@ static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
unsigned int iflag, cflag;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
iflag = termios->c_iflag;
|
||
cflag = termios->c_cflag;
|
||
@@ -343,7 +343,7 @@ static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
uart_update_timeout(port, cflag,
|
||
(port->uartclk / (16 * quot)));
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *sunhv_type(struct uart_port *port)
|
||
@@ -437,9 +437,9 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
|
||
int locked = 1;
|
||
|
||
if (port->sysrq || oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
while (n > 0) {
|
||
unsigned long ra = __pa(con_write_page);
|
||
@@ -470,7 +470,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
|
||
}
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static inline void sunhv_console_putchar(struct uart_port *port, char c)
|
||
@@ -492,9 +492,9 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
|
||
int i, locked = 1;
|
||
|
||
if (port->sysrq || oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
for (i = 0; i < n; i++) {
|
||
if (*s == '\n')
|
||
@@ -503,7 +503,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
|
||
}
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static struct console sunhv_console = {
|
||
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
|
||
index 3aacd5eb4..4251f4e1b 100644
|
||
--- a/drivers/tty/serial/sunplus-uart.c
|
||
+++ b/drivers/tty/serial/sunplus-uart.c
|
||
@@ -184,7 +184,7 @@ static void sunplus_break_ctl(struct uart_port *port, int ctl)
|
||
unsigned long flags;
|
||
unsigned int lcr;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
lcr = readl(port->membase + SUP_UART_LCR);
|
||
|
||
@@ -195,7 +195,7 @@ static void sunplus_break_ctl(struct uart_port *port, int ctl)
|
||
|
||
writel(lcr, port->membase + SUP_UART_LCR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void transmit_chars(struct uart_port *port)
|
||
@@ -277,7 +277,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
|
||
struct uart_port *port = args;
|
||
unsigned int isc;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
isc = readl(port->membase + SUP_UART_ISC);
|
||
|
||
@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
|
||
if (isc & SUP_UART_ISC_TX)
|
||
transmit_chars(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -302,14 +302,14 @@ static int sunplus_startup(struct uart_port *port)
|
||
if (ret)
|
||
return ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* isc define Bit[7:4] int setting, Bit[3:0] int status
|
||
* isc register will clean Bit[3:0] int status after read
|
||
* only do a write to Bit[7:4] int setting
|
||
*/
|
||
isc |= SUP_UART_ISC_RXM;
|
||
writel(isc, port->membase + SUP_UART_ISC);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -318,13 +318,13 @@ static void sunplus_shutdown(struct uart_port *port)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* isc define Bit[7:4] int setting, Bit[3:0] int status
|
||
* isc register will clean Bit[3:0] int status after read
|
||
* only do a write to Bit[7:4] int setting
|
||
*/
|
||
writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
free_irq(port->irq, port);
|
||
}
|
||
@@ -372,7 +372,7 @@ static void sunplus_set_termios(struct uart_port *port,
|
||
lcr |= UART_LCR_EPAR;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
@@ -407,7 +407,7 @@ static void sunplus_set_termios(struct uart_port *port,
|
||
writel(div_l, port->membase + SUP_UART_DIV_L);
|
||
writel(lcr, port->membase + SUP_UART_LCR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||
@@ -517,15 +517,15 @@ static void sunplus_console_write(struct console *co,
|
||
if (sunplus_console_ports[co->index]->port.sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock);
|
||
+ locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
|
||
else
|
||
- spin_lock(&sunplus_console_ports[co->index]->port.lock);
|
||
+ uart_port_lock(&sunplus_console_ports[co->index]->port);
|
||
|
||
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
|
||
sunplus_uart_console_putchar);
|
||
|
||
if (locked)
|
||
- spin_unlock(&sunplus_console_ports[co->index]->port.lock);
|
||
+ uart_port_unlock(&sunplus_console_ports[co->index]->port);
|
||
|
||
local_irq_restore(flags);
|
||
}
|
||
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
|
||
index 40eeaf835..6aa51a6f8 100644
|
||
--- a/drivers/tty/serial/sunsab.c
|
||
+++ b/drivers/tty/serial/sunsab.c
|
||
@@ -310,7 +310,7 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
|
||
unsigned long flags;
|
||
unsigned char gis;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
status.stat = 0;
|
||
gis = readb(&up->regs->r.gis) >> up->gis_shift;
|
||
@@ -331,7 +331,7 @@ static irqreturn_t sunsab_interrupt(int irq, void *dev_id)
|
||
transmit_chars(up, &status);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
if (port)
|
||
tty_flip_buffer_push(port);
|
||
@@ -473,12 +473,12 @@ static void sunsab_send_xchar(struct uart_port *port, char ch)
|
||
if (ch == __DISABLED_CHAR)
|
||
return;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
sunsab_tec_wait(up);
|
||
writeb(ch, &up->regs->w.tic);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
/* port->lock held by caller. */
|
||
@@ -499,7 +499,7 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
|
||
unsigned long flags;
|
||
unsigned char val;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
val = up->cached_dafo;
|
||
if (break_state)
|
||
@@ -512,7 +512,7 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
|
||
if (test_bit(SAB82532_XPR, &up->irqflags))
|
||
sunsab_tx_idle(up);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
/* port->lock is not held. */
|
||
@@ -527,7 +527,7 @@ static int sunsab_startup(struct uart_port *port)
|
||
if (err)
|
||
return err;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Wait for any commands or immediate characters
|
||
@@ -582,7 +582,7 @@ static int sunsab_startup(struct uart_port *port)
|
||
set_bit(SAB82532_ALLS, &up->irqflags);
|
||
set_bit(SAB82532_XPR, &up->irqflags);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -594,7 +594,7 @@ static void sunsab_shutdown(struct uart_port *port)
|
||
container_of(port, struct uart_sunsab_port, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/* Disable Interrupts */
|
||
up->interrupt_mask0 = 0xff;
|
||
@@ -628,7 +628,7 @@ static void sunsab_shutdown(struct uart_port *port)
|
||
writeb(tmp, &up->regs->rw.ccr0);
|
||
#endif
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
free_irq(up->port.irq, up);
|
||
}
|
||
|
||
@@ -779,9 +779,9 @@ static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
|
||
unsigned int quot = uart_get_divisor(port, baud);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static const char *sunsab_type(struct uart_port *port)
|
||
@@ -857,15 +857,15 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
|
||
int locked = 1;
|
||
|
||
if (up->port.sysrq || oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
uart_console_write(&up->port, s, n, sunsab_console_putchar);
|
||
sunsab_tec_wait(up);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int sunsab_console_setup(struct console *con, char *options)
|
||
@@ -914,7 +914,7 @@ static int sunsab_console_setup(struct console *con, char *options)
|
||
*/
|
||
sunsab_startup(&up->port);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Finally, enable interrupts
|
||
@@ -932,7 +932,7 @@ static int sunsab_console_setup(struct console *con, char *options)
|
||
sunsab_convert_to_sab(up, con->cflag, 0, baud, quot);
|
||
sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
|
||
index 58a4342ad..1e051cc25 100644
|
||
--- a/drivers/tty/serial/sunsu.c
|
||
+++ b/drivers/tty/serial/sunsu.c
|
||
@@ -212,9 +212,9 @@ static void enable_rsa(struct uart_sunsu_port *up)
|
||
{
|
||
if (up->port.type == PORT_RSA) {
|
||
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
__enable_rsa(up);
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
|
||
serial_outp(up, UART_RSA_FRR, 0);
|
||
@@ -234,7 +234,7 @@ static void disable_rsa(struct uart_sunsu_port *up)
|
||
|
||
if (up->port.type == PORT_RSA &&
|
||
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
|
||
- spin_lock_irq(&up->port.lock);
|
||
+ uart_port_lock_irq(&up->port);
|
||
|
||
mode = serial_inp(up, UART_RSA_MSR);
|
||
result = !(mode & UART_RSA_MSR_FIFO);
|
||
@@ -247,7 +247,7 @@ static void disable_rsa(struct uart_sunsu_port *up)
|
||
|
||
if (result)
|
||
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
|
||
- spin_unlock_irq(&up->port.lock);
|
||
+ uart_port_unlock_irq(&up->port);
|
||
}
|
||
}
|
||
#endif /* CONFIG_SERIAL_8250_RSA */
|
||
@@ -311,10 +311,10 @@ static void sunsu_enable_ms(struct uart_port *port)
|
||
container_of(port, struct uart_sunsu_port, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
up->ier |= UART_IER_MSI;
|
||
serial_out(up, UART_IER, up->ier);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static void
|
||
@@ -456,7 +456,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
|
||
unsigned long flags;
|
||
unsigned char status;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
do {
|
||
status = serial_inp(up, UART_LSR);
|
||
@@ -470,7 +470,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
|
||
|
||
} while (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT));
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -545,9 +545,9 @@ static unsigned int sunsu_tx_empty(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return ret;
|
||
}
|
||
@@ -599,13 +599,13 @@ static void sunsu_break_ctl(struct uart_port *port, int break_state)
|
||
container_of(port, struct uart_sunsu_port, port);
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (break_state == -1)
|
||
up->lcr |= UART_LCR_SBC;
|
||
else
|
||
up->lcr &= ~UART_LCR_SBC;
|
||
serial_out(up, UART_LCR, up->lcr);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int sunsu_startup(struct uart_port *port)
|
||
@@ -683,12 +683,12 @@ static int sunsu_startup(struct uart_port *port)
|
||
*/
|
||
serial_outp(up, UART_LCR, UART_LCR_WLEN8);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
up->port.mctrl |= TIOCM_OUT2;
|
||
|
||
sunsu_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/*
|
||
* Finally, enable interrupts. Note: Modem status interrupts
|
||
@@ -731,7 +731,7 @@ static void sunsu_shutdown(struct uart_port *port)
|
||
up->ier = 0;
|
||
serial_outp(up, UART_IER, 0);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (up->port.flags & UPF_FOURPORT) {
|
||
/* reset interrupts on the AST Fourport board */
|
||
inb((up->port.iobase & 0xfe0) | 0x1f);
|
||
@@ -740,7 +740,7 @@ static void sunsu_shutdown(struct uart_port *port)
|
||
up->port.mctrl &= ~TIOCM_OUT2;
|
||
|
||
sunsu_set_mctrl(&up->port, up->port.mctrl);
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
/*
|
||
* Disable break condition and FIFOs
|
||
@@ -826,7 +826,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
|
||
* Ok, we're now changing the port state. Do it with
|
||
* interrupts disabled.
|
||
*/
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* Update the per-port timeout.
|
||
@@ -891,7 +891,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
|
||
|
||
up->cflag = cflag;
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static void
|
||
@@ -1038,7 +1038,7 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
|
||
up->type_probed = PORT_UNKNOWN;
|
||
up->port.iotype = UPIO_MEM;
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
if (!(up->port.flags & UPF_BUGGY_UART)) {
|
||
/*
|
||
@@ -1173,7 +1173,7 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
|
||
serial_outp(up, UART_IER, 0);
|
||
|
||
out:
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static struct uart_driver sunsu_reg = {
|
||
@@ -1298,9 +1298,9 @@ static void sunsu_console_write(struct console *co, const char *s,
|
||
int locked = 1;
|
||
|
||
if (up->port.sysrq || oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
/*
|
||
* First save the UER then disable the interrupts
|
||
@@ -1318,7 +1318,7 @@ static void sunsu_console_write(struct console *co, const char *s,
|
||
serial_out(up, UART_IER, ier);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
/*
|
||
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
|
||
index c8c71c562..d3b5e864b 100644
|
||
--- a/drivers/tty/serial/sunzilog.c
|
||
+++ b/drivers/tty/serial/sunzilog.c
|
||
@@ -531,7 +531,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
|
||
struct tty_port *port;
|
||
unsigned char r3;
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
r3 = read_zsreg(channel, R3);
|
||
|
||
/* Channel A */
|
||
@@ -548,7 +548,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
|
||
if (r3 & CHATxIP)
|
||
sunzilog_transmit_chars(up, channel);
|
||
}
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
if (port)
|
||
tty_flip_buffer_push(port);
|
||
@@ -557,7 +557,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
|
||
up = up->next;
|
||
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
|
||
|
||
- spin_lock(&up->port.lock);
|
||
+ uart_port_lock(&up->port);
|
||
port = NULL;
|
||
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
|
||
writeb(RES_H_IUS, &channel->control);
|
||
@@ -571,7 +571,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id)
|
||
if (r3 & CHBTxIP)
|
||
sunzilog_transmit_chars(up, channel);
|
||
}
|
||
- spin_unlock(&up->port.lock);
|
||
+ uart_port_unlock(&up->port);
|
||
|
||
if (port)
|
||
tty_flip_buffer_push(port);
|
||
@@ -604,11 +604,11 @@ static unsigned int sunzilog_tx_empty(struct uart_port *port)
|
||
unsigned char status;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
status = sunzilog_read_channel_status(port);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (status & Tx_BUF_EMP)
|
||
ret = TIOCSER_TEMT;
|
||
@@ -764,7 +764,7 @@ static void sunzilog_break_ctl(struct uart_port *port, int break_state)
|
||
else
|
||
clear_bits |= SND_BRK;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
|
||
if (new_reg != up->curregs[R5]) {
|
||
@@ -774,7 +774,7 @@ static void sunzilog_break_ctl(struct uart_port *port, int break_state)
|
||
write_zsreg(channel, R5, up->curregs[R5]);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static void __sunzilog_startup(struct uart_sunzilog_port *up)
|
||
@@ -800,9 +800,9 @@ static int sunzilog_startup(struct uart_port *port)
|
||
if (ZS_IS_CONS(up))
|
||
return 0;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
__sunzilog_startup(up);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return 0;
|
||
}
|
||
|
||
@@ -840,7 +840,7 @@ static void sunzilog_shutdown(struct uart_port *port)
|
||
if (ZS_IS_CONS(up))
|
||
return;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
channel = ZILOG_CHANNEL_FROM_PORT(port);
|
||
|
||
@@ -853,7 +853,7 @@ static void sunzilog_shutdown(struct uart_port *port)
|
||
up->curregs[R5] &= ~SND_BRK;
|
||
sunzilog_maybe_update_regs(up, channel);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/* Shared by TTY driver and serial console setup. The port lock is held
|
||
@@ -945,7 +945,7 @@ sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
|
||
|
||
@@ -962,7 +962,7 @@ sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static const char *sunzilog_type(struct uart_port *port)
|
||
@@ -1201,15 +1201,15 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
|
||
int locked = 1;
|
||
|
||
if (up->port.sysrq || oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
uart_console_write(&up->port, s, count, sunzilog_putchar);
|
||
udelay(2);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
}
|
||
|
||
static int __init sunzilog_console_setup(struct console *con, char *options)
|
||
@@ -1244,7 +1244,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
|
||
|
||
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
|
||
up->curregs[R15] |= BRKIE;
|
||
sunzilog_convert_to_zs(up, con->cflag, 0, brg);
|
||
@@ -1252,7 +1252,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
|
||
sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
|
||
__sunzilog_startup(up);
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
return 0;
|
||
}
|
||
@@ -1333,7 +1333,7 @@ static void sunzilog_init_hw(struct uart_sunzilog_port *up)
|
||
|
||
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
|
||
|
||
- spin_lock_irqsave(&up->port.lock, flags);
|
||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||
if (ZS_IS_CHANNEL_A(up)) {
|
||
write_zsreg(channel, R9, FHWRES);
|
||
ZSDELAY_LONG();
|
||
@@ -1383,7 +1383,7 @@ static void sunzilog_init_hw(struct uart_sunzilog_port *up)
|
||
write_zsreg(channel, R9, up->curregs[R9]);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||
|
||
#ifdef CONFIG_SERIO
|
||
if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
|
||
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
|
||
index 0859394a7..0cc6524f5 100644
|
||
--- a/drivers/tty/serial/timbuart.c
|
||
+++ b/drivers/tty/serial/timbuart.c
|
||
@@ -174,7 +174,7 @@ static void timbuart_tasklet(struct tasklet_struct *t)
|
||
struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
|
||
u32 isr, ier = 0;
|
||
|
||
- spin_lock(&uart->port.lock);
|
||
+ uart_port_lock(&uart->port);
|
||
|
||
isr = ioread32(uart->port.membase + TIMBUART_ISR);
|
||
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
|
||
@@ -189,7 +189,7 @@ static void timbuart_tasklet(struct tasklet_struct *t)
|
||
|
||
iowrite32(ier, uart->port.membase + TIMBUART_IER);
|
||
|
||
- spin_unlock(&uart->port.lock);
|
||
+ uart_port_unlock(&uart->port);
|
||
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
|
||
}
|
||
|
||
@@ -295,10 +295,10 @@ static void timbuart_set_termios(struct uart_port *port,
|
||
tty_termios_copy_hw(termios, old);
|
||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *timbuart_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
|
||
index b225a78f6..404c14aca 100644
|
||
--- a/drivers/tty/serial/uartlite.c
|
||
+++ b/drivers/tty/serial/uartlite.c
|
||
@@ -216,11 +216,11 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
|
||
unsigned long flags;
|
||
|
||
do {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
stat = uart_in32(ULITE_STATUS, port);
|
||
busy = ulite_receive(port, stat);
|
||
busy |= ulite_transmit(port, stat);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
n++;
|
||
} while (busy);
|
||
|
||
@@ -238,9 +238,9 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
|
||
unsigned long flags;
|
||
unsigned int ret;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
ret = uart_in32(ULITE_STATUS, port);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
|
||
}
|
||
@@ -323,7 +323,7 @@ static void ulite_set_termios(struct uart_port *port,
|
||
termios->c_cflag |= pdata->cflags & (PARENB | PARODD | CSIZE);
|
||
tty_termios_encode_baud_rate(termios, pdata->baud, pdata->baud);
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
|
||
| ULITE_STATUS_TXFULL;
|
||
@@ -346,7 +346,7 @@ static void ulite_set_termios(struct uart_port *port,
|
||
/* update timeout */
|
||
uart_update_timeout(port, termios->c_cflag, pdata->baud);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *ulite_type(struct uart_port *port)
|
||
@@ -495,9 +495,9 @@ static void ulite_console_write(struct console *co, const char *s,
|
||
int locked = 1;
|
||
|
||
if (oops_in_progress) {
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
} else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* save and disable interrupt */
|
||
ier = uart_in32(ULITE_STATUS, port) & ULITE_STATUS_IE;
|
||
@@ -512,7 +512,7 @@ static void ulite_console_write(struct console *co, const char *s,
|
||
uart_out32(ULITE_CONTROL_IE, ULITE_CONTROL, port);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static int ulite_console_setup(struct console *co, char *options)
|
||
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
|
||
index b06661b80..ed7a6bb55 100644
|
||
--- a/drivers/tty/serial/ucc_uart.c
|
||
+++ b/drivers/tty/serial/ucc_uart.c
|
||
@@ -931,7 +931,7 @@ static void qe_uart_set_termios(struct uart_port *port,
|
||
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
|
||
|
||
/* Do we really need a spinlock here? */
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Update the per-port timeout. */
|
||
uart_update_timeout(port, termios->c_cflag, baud);
|
||
@@ -949,7 +949,7 @@ static void qe_uart_set_termios(struct uart_port *port,
|
||
qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
|
||
}
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/*
|
||
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
|
||
index c5d5c2765..78a1c1eea 100644
|
||
--- a/drivers/tty/serial/vt8500_serial.c
|
||
+++ b/drivers/tty/serial/vt8500_serial.c
|
||
@@ -227,7 +227,7 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
|
||
struct uart_port *port = dev_id;
|
||
unsigned long isr;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
isr = vt8500_read(port, VT8500_URISR);
|
||
|
||
/* Acknowledge active status bits */
|
||
@@ -240,7 +240,7 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
|
||
if (isr & TCTS)
|
||
handle_delta_cts(port);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
return IRQ_HANDLED;
|
||
}
|
||
@@ -342,7 +342,7 @@ static void vt8500_set_termios(struct uart_port *port,
|
||
unsigned int baud, lcr;
|
||
unsigned int loops = 1000;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* calculate and set baud rate */
|
||
baud = uart_get_baud_rate(port, termios, old, 900, 921600);
|
||
@@ -410,7 +410,7 @@ static void vt8500_set_termios(struct uart_port *port,
|
||
vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
|
||
vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
static const char *vt8500_type(struct uart_port *port)
|
||
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
|
||
index 2e5e86a00..9c13dac1d 100644
|
||
--- a/drivers/tty/serial/xilinx_uartps.c
|
||
+++ b/drivers/tty/serial/xilinx_uartps.c
|
||
@@ -346,7 +346,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
|
||
struct uart_port *port = (struct uart_port *)dev_id;
|
||
unsigned int isrstatus;
|
||
|
||
- spin_lock(&port->lock);
|
||
+ uart_port_lock(port);
|
||
|
||
/* Read the interrupt status register to determine which
|
||
* interrupt(s) is/are active and clear them.
|
||
@@ -369,7 +369,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
|
||
!(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
|
||
cdns_uart_handle_rx(dev_id, isrstatus);
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
return IRQ_HANDLED;
|
||
}
|
||
|
||
@@ -506,14 +506,14 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
|
||
return NOTIFY_BAD;
|
||
}
|
||
|
||
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
|
||
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
|
||
|
||
/* Disable the TX and RX to set baud rate */
|
||
ctrl_reg = readl(port->membase + CDNS_UART_CR);
|
||
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
|
||
writel(ctrl_reg, port->membase + CDNS_UART_CR);
|
||
|
||
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
|
||
|
||
return NOTIFY_OK;
|
||
}
|
||
@@ -523,7 +523,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
|
||
* frequency.
|
||
*/
|
||
|
||
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
|
||
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
|
||
|
||
locked = 1;
|
||
port->uartclk = ndata->new_rate;
|
||
@@ -533,7 +533,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
|
||
fallthrough;
|
||
case ABORT_RATE_CHANGE:
|
||
if (!locked)
|
||
- spin_lock_irqsave(&cdns_uart->port->lock, flags);
|
||
+ uart_port_lock_irqsave(cdns_uart->port, &flags);
|
||
|
||
/* Set TX/RX Reset */
|
||
ctrl_reg = readl(port->membase + CDNS_UART_CR);
|
||
@@ -555,7 +555,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
|
||
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
|
||
writel(ctrl_reg, port->membase + CDNS_UART_CR);
|
||
|
||
- spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(cdns_uart->port, flags);
|
||
|
||
return NOTIFY_OK;
|
||
default:
|
||
@@ -652,7 +652,7 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
|
||
unsigned int status;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
status = readl(port->membase + CDNS_UART_CR);
|
||
|
||
@@ -664,7 +664,7 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
|
||
writel(CDNS_UART_CR_STOPBRK | status,
|
||
port->membase + CDNS_UART_CR);
|
||
}
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/**
|
||
@@ -683,7 +683,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
|
||
unsigned long flags;
|
||
unsigned int ctrl_reg, mode_reg;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable the TX and RX to set baud rate */
|
||
ctrl_reg = readl(port->membase + CDNS_UART_CR);
|
||
@@ -794,7 +794,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
|
||
cval &= ~CDNS_UART_MODEMCR_FCM;
|
||
writel(cval, port->membase + CDNS_UART_MODEMCR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/**
|
||
@@ -813,7 +813,7 @@ static int cdns_uart_startup(struct uart_port *port)
|
||
|
||
is_brk_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable the TX and RX */
|
||
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
|
||
@@ -861,7 +861,7 @@ static int cdns_uart_startup(struct uart_port *port)
|
||
writel(readl(port->membase + CDNS_UART_ISR),
|
||
port->membase + CDNS_UART_ISR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
|
||
if (ret) {
|
||
@@ -889,7 +889,7 @@ static void cdns_uart_shutdown(struct uart_port *port)
|
||
int status;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Disable interrupts */
|
||
status = readl(port->membase + CDNS_UART_IMR);
|
||
@@ -900,7 +900,7 @@ static void cdns_uart_shutdown(struct uart_port *port)
|
||
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
|
||
port->membase + CDNS_UART_CR);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
free_irq(port->irq, port);
|
||
}
|
||
@@ -1050,7 +1050,7 @@ static int cdns_uart_poll_get_char(struct uart_port *port)
|
||
int c;
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Check if FIFO is empty */
|
||
if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
|
||
@@ -1058,7 +1058,7 @@ static int cdns_uart_poll_get_char(struct uart_port *port)
|
||
else /* Read a character */
|
||
c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
return c;
|
||
}
|
||
@@ -1067,7 +1067,7 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||
{
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Wait until FIFO is empty */
|
||
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
|
||
@@ -1080,7 +1080,7 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||
while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
|
||
cpu_relax();
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
#endif
|
||
|
||
@@ -1232,9 +1232,9 @@ static void cdns_uart_console_write(struct console *co, const char *s,
|
||
if (port->sysrq)
|
||
locked = 0;
|
||
else if (oops_in_progress)
|
||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||
else
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* save and disable interrupt */
|
||
imr = readl(port->membase + CDNS_UART_IMR);
|
||
@@ -1257,7 +1257,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
|
||
writel(imr, port->membase + CDNS_UART_IER);
|
||
|
||
if (locked)
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/**
|
||
@@ -1325,7 +1325,7 @@ static int cdns_uart_suspend(struct device *device)
|
||
if (console_suspend_enabled && uart_console(port) && may_wake) {
|
||
unsigned long flags;
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* Empty the receive FIFO 1st before making changes */
|
||
while (!(readl(port->membase + CDNS_UART_SR) &
|
||
CDNS_UART_SR_RXEMPTY))
|
||
@@ -1334,7 +1334,7 @@ static int cdns_uart_suspend(struct device *device)
|
||
writel(1, port->membase + CDNS_UART_RXWM);
|
||
/* disable RX timeout interrups */
|
||
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
/*
|
||
@@ -1372,7 +1372,7 @@ static int cdns_uart_resume(struct device *device)
|
||
return ret;
|
||
}
|
||
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
|
||
/* Set TX/RX Reset */
|
||
ctrl_reg = readl(port->membase + CDNS_UART_CR);
|
||
@@ -1392,14 +1392,14 @@ static int cdns_uart_resume(struct device *device)
|
||
|
||
clk_disable(cdns_uart->uartclk);
|
||
clk_disable(cdns_uart->pclk);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
} else {
|
||
- spin_lock_irqsave(&port->lock, flags);
|
||
+ uart_port_lock_irqsave(port, &flags);
|
||
/* restore original rx trigger level */
|
||
writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
|
||
/* enable RX timeout interrupt */
|
||
writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
|
||
return uart_resume_port(cdns_uart->cdns_uart_driver, port);
|
||
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
|
||
index 117abcf36..03e2eaf24 100644
|
||
--- a/drivers/tty/tty_io.c
|
||
+++ b/drivers/tty/tty_io.c
|
||
@@ -3543,8 +3543,15 @@ static ssize_t show_cons_active(struct device *dev,
|
||
for_each_console(c) {
|
||
if (!c->device)
|
||
continue;
|
||
- if (!c->write)
|
||
- continue;
|
||
+ if (c->flags & CON_NBCON) {
|
||
+ if (!c->write_atomic &&
|
||
+ !(c->write_thread && c->kthread)) {
|
||
+ continue;
|
||
+ }
|
||
+ } else {
|
||
+ if (!c->write)
|
||
+ continue;
|
||
+ }
|
||
if ((c->flags & CON_ENABLED) == 0)
|
||
continue;
|
||
cs[i++] = c;
|
||
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
|
||
index e0758fe79..270367654 100644
|
||
--- a/fs/proc/consoles.c
|
||
+++ b/fs/proc/consoles.c
|
||
@@ -21,12 +21,14 @@ static int show_console_dev(struct seq_file *m, void *v)
|
||
{ CON_ENABLED, 'E' },
|
||
{ CON_CONSDEV, 'C' },
|
||
{ CON_BOOT, 'B' },
|
||
+ { CON_NBCON, 'N' },
|
||
{ CON_PRINTBUFFER, 'p' },
|
||
{ CON_BRL, 'b' },
|
||
{ CON_ANYTIME, 'a' },
|
||
};
|
||
char flags[ARRAY_SIZE(con_flags) + 1];
|
||
struct console *con = v;
|
||
+ char con_write = '-';
|
||
unsigned int a;
|
||
dev_t dev = 0;
|
||
|
||
@@ -57,9 +59,15 @@ static int show_console_dev(struct seq_file *m, void *v)
|
||
seq_setwidth(m, 21 - 1);
|
||
seq_printf(m, "%s%d", con->name, con->index);
|
||
seq_pad(m, ' ');
|
||
- seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-',
|
||
- con->write ? 'W' : '-', con->unblank ? 'U' : '-',
|
||
- flags);
|
||
+ if (con->flags & CON_NBCON) {
|
||
+ if (con->write_atomic || con->write_thread)
|
||
+ con_write = 'W';
|
||
+ } else {
|
||
+ if (con->write)
|
||
+ con_write = 'W';
|
||
+ }
|
||
+ seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-', con_write,
|
||
+ con->unblank ? 'U' : '-', flags);
|
||
if (dev)
|
||
seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
|
||
|
||
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
|
||
index fc53e0ad5..448bbef47 100644
|
||
--- a/include/linux/bottom_half.h
|
||
+++ b/include/linux/bottom_half.h
|
||
@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
|
||
|
||
#ifdef CONFIG_PREEMPT_RT
|
||
extern bool local_bh_blocked(void);
|
||
+extern void softirq_preempt(void);
|
||
#else
|
||
static inline bool local_bh_blocked(void) { return false; }
|
||
+static inline void softirq_preempt(void) { }
|
||
#endif
|
||
|
||
#endif /* _LINUX_BH_H */
|
||
diff --git a/include/linux/console.h b/include/linux/console.h
|
||
index 7de11c763..1eb9580e9 100644
|
||
--- a/include/linux/console.h
|
||
+++ b/include/linux/console.h
|
||
@@ -16,7 +16,9 @@
|
||
|
||
#include <linux/atomic.h>
|
||
#include <linux/bits.h>
|
||
+#include <linux/irq_work.h>
|
||
#include <linux/rculist.h>
|
||
+#include <linux/rcuwait.h>
|
||
#include <linux/types.h>
|
||
|
||
struct vc_data;
|
||
@@ -156,6 +158,8 @@ static inline int con_debug_leave(void)
|
||
* /dev/kmesg which requires a larger output buffer.
|
||
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
|
||
* printing callbacks must not be called.
|
||
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
|
||
+ * constraints.
|
||
*/
|
||
enum cons_flags {
|
||
CON_PRINTBUFFER = BIT(0),
|
||
@@ -166,6 +170,111 @@ enum cons_flags {
|
||
CON_BRL = BIT(5),
|
||
CON_EXTENDED = BIT(6),
|
||
CON_SUSPENDED = BIT(7),
|
||
+ CON_NBCON = BIT(8),
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct nbcon_state - console state for nbcon consoles
|
||
+ * @atom: Compound of the state fields for atomic operations
|
||
+ *
|
||
+ * @req_prio: The priority of a handover request
|
||
+ * @prio: The priority of the current owner
|
||
+ * @unsafe: Console is busy in a non takeover region
|
||
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
|
||
+ * past. The console cannot be safe until re-initialized.
|
||
+ * @cpu: The CPU on which the owner runs
|
||
+ *
|
||
+ * To be used for reading and preparing of the value stored in the nbcon
|
||
+ * state variable @console::nbcon_state.
|
||
+ *
|
||
+ * The @prio and @req_prio fields are particularly important to allow
|
||
+ * spin-waiting to timeout and give up without the risk of a waiter being
|
||
+ * assigned the lock after giving up.
|
||
+ */
|
||
+struct nbcon_state {
|
||
+ union {
|
||
+ unsigned int atom;
|
||
+ struct {
|
||
+ unsigned int prio : 2;
|
||
+ unsigned int req_prio : 2;
|
||
+ unsigned int unsafe : 1;
|
||
+ unsigned int unsafe_takeover : 1;
|
||
+ unsigned int cpu : 24;
|
||
+ };
|
||
+ };
|
||
+};
|
||
+
|
||
+/*
|
||
+ * The nbcon_state struct is used to easily create and interpret values that
|
||
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
|
||
+ * within the size boundaries of the atomic variable's underlying type in
|
||
+ * order to avoid any accidental truncation.
|
||
+ */
|
||
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
|
||
+
|
||
+/**
|
||
+ * nbcon_prio - console owner priority for nbcon consoles
|
||
+ * @NBCON_PRIO_NONE: Unused
|
||
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
|
||
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
|
||
+ * @NBCON_PRIO_PANIC: Panic output
|
||
+ * @NBCON_PRIO_MAX: The number of priority levels
|
||
+ *
|
||
+ * A higher priority context can takeover the console when it is
|
||
+ * in the safe state. The final attempt to flush consoles in panic()
|
||
+ * can be allowed to do so even in an unsafe state (Hope and pray).
|
||
+ */
|
||
+enum nbcon_prio {
|
||
+ NBCON_PRIO_NONE = 0,
|
||
+ NBCON_PRIO_NORMAL,
|
||
+ NBCON_PRIO_EMERGENCY,
|
||
+ NBCON_PRIO_PANIC,
|
||
+ NBCON_PRIO_MAX,
|
||
+};
|
||
+
|
||
+struct console;
|
||
+struct printk_buffers;
|
||
+
|
||
+/**
|
||
+ * struct nbcon_context - Context for console acquire/release
|
||
+ * @console: The associated console
|
||
+ * @spinwait_max_us: Limit for spin-wait acquire
|
||
+ * @prio: Priority of the context
|
||
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
|
||
+ * be used only with NBCON_PRIO_PANIC @prio. It
|
||
+ * might cause a system freeze when the console
|
||
+ * is used later.
|
||
+ * @backlog: Ringbuffer has pending records
|
||
+ * @pbufs: Pointer to the text buffer for this context
|
||
+ * @seq: The sequence number to print for this context
|
||
+ */
|
||
+struct nbcon_context {
|
||
+ /* members set by caller */
|
||
+ struct console *console;
|
||
+ unsigned int spinwait_max_us;
|
||
+ enum nbcon_prio prio;
|
||
+ unsigned int allow_unsafe_takeover : 1;
|
||
+
|
||
+ /* members set by emit */
|
||
+ unsigned int backlog : 1;
|
||
+
|
||
+ /* members set by acquire */
|
||
+ struct printk_buffers *pbufs;
|
||
+ u64 seq;
|
||
+};
|
||
+
|
||
+/**
|
||
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
|
||
+ * @ctxt: The core console context
|
||
+ * @outbuf: Pointer to the text buffer for output
|
||
+ * @len: Length to write
|
||
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
|
||
+ */
|
||
+struct nbcon_write_context {
|
||
+ struct nbcon_context __private ctxt;
|
||
+ char *outbuf;
|
||
+ unsigned int len;
|
||
+ bool unsafe_takeover;
|
||
};
|
||
|
||
/**
|
||
@@ -187,6 +296,17 @@ enum cons_flags {
|
||
* @dropped: Number of unreported dropped ringbuffer records
|
||
* @data: Driver private data
|
||
* @node: hlist node for the console list
|
||
+ *
|
||
+ * @write_atomic: Write callback for atomic context
|
||
+ * @write_thread: Write callback for non-atomic context
|
||
+ * @driver_enter: Callback to begin synchronization with driver code
|
||
+ * @driver_exit: Callback to finish synchronization with driver code
|
||
+ * @nbcon_state: State for nbcon consoles
|
||
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
|
||
+ * @pbufs: Pointer to nbcon private buffer
|
||
+ * @kthread: Printer kthread for this console
|
||
+ * @rcuwait: RCU-safe wait object for @kthread waking
|
||
+ * @irq_work: Defer @kthread waking to IRQ work context
|
||
*/
|
||
struct console {
|
||
char name[16];
|
||
@@ -206,6 +326,20 @@ struct console {
|
||
unsigned long dropped;
|
||
void *data;
|
||
struct hlist_node node;
|
||
+
|
||
+ /* nbcon console specific members */
|
||
+ bool (*write_atomic)(struct console *con,
|
||
+ struct nbcon_write_context *wctxt);
|
||
+ bool (*write_thread)(struct console *con,
|
||
+ struct nbcon_write_context *wctxt);
|
||
+ void (*driver_enter)(struct console *con, unsigned long *flags);
|
||
+ void (*driver_exit)(struct console *con, unsigned long flags);
|
||
+ atomic_t __private nbcon_state;
|
||
+ atomic_long_t __private nbcon_seq;
|
||
+ struct printk_buffers *pbufs;
|
||
+ struct task_struct *kthread;
|
||
+ struct rcuwait rcuwait;
|
||
+ struct irq_work irq_work;
|
||
};
|
||
|
||
#ifdef CONFIG_LOCKDEP
|
||
@@ -332,6 +466,22 @@ static inline bool console_is_registered(const struct console *con)
|
||
lockdep_assert_console_list_lock_held(); \
|
||
hlist_for_each_entry(con, &console_list, node)
|
||
|
||
+#ifdef CONFIG_PRINTK
|
||
+extern void nbcon_cpu_emergency_enter(void);
|
||
+extern void nbcon_cpu_emergency_exit(void);
|
||
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
|
||
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
|
||
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
|
||
+extern void nbcon_reacquire(struct nbcon_write_context *wctxt);
|
||
+#else
|
||
+static inline void nbcon_cpu_emergency_enter(void) { }
|
||
+static inline void nbcon_cpu_emergency_exit(void) { }
|
||
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
|
||
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
||
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
|
||
+static inline void nbcon_reacquire(struct nbcon_write_context *wctxt) { }
|
||
+#endif
|
||
+
|
||
extern int console_set_on_cmdline;
|
||
extern struct console *early_console;
|
||
|
||
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
|
||
index b0fb775a6..f5bb19369 100644
|
||
--- a/include/linux/entry-common.h
|
||
+++ b/include/linux/entry-common.h
|
||
@@ -65,7 +65,7 @@
|
||
#define EXIT_TO_USER_MODE_WORK \
|
||
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
||
- ARCH_EXIT_TO_USER_MODE_WORK)
|
||
+ _TIF_NEED_RESCHED_LAZY | ARCH_EXIT_TO_USER_MODE_WORK)
|
||
|
||
/**
|
||
* arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
|
||
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
|
||
index 6813171af..674a622c9 100644
|
||
--- a/include/linux/entry-kvm.h
|
||
+++ b/include/linux/entry-kvm.h
|
||
@@ -18,7 +18,7 @@
|
||
|
||
#define XFER_TO_GUEST_MODE_WORK \
|
||
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
|
||
- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
|
||
+ _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED_LAZY | ARCH_XFER_TO_GUEST_MODE_WORK)
|
||
|
||
struct kvm_vcpu;
|
||
|
||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
|
||
index 9aac2ab15..6b9c8a16e 100644
|
||
--- a/include/linux/interrupt.h
|
||
+++ b/include/linux/interrupt.h
|
||
@@ -613,6 +613,35 @@ extern void __raise_softirq_irqoff(unsigned int nr);
|
||
extern void raise_softirq_irqoff(unsigned int nr);
|
||
extern void raise_softirq(unsigned int nr);
|
||
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+DECLARE_PER_CPU(struct task_struct *, timersd);
|
||
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
|
||
+
|
||
+extern void raise_timer_softirq(void);
|
||
+extern void raise_hrtimer_softirq(void);
|
||
+
|
||
+static inline unsigned int local_pending_timers(void)
|
||
+{
|
||
+ return __this_cpu_read(pending_timer_softirq);
|
||
+}
|
||
+
|
||
+#else
|
||
+static inline void raise_timer_softirq(void)
|
||
+{
|
||
+ raise_softirq(TIMER_SOFTIRQ);
|
||
+}
|
||
+
|
||
+static inline void raise_hrtimer_softirq(void)
|
||
+{
|
||
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
+}
|
||
+
|
||
+static inline unsigned int local_pending_timers(void)
|
||
+{
|
||
+ return local_softirq_pending();
|
||
+}
|
||
+#endif
|
||
+
|
||
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
||
|
||
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
||
index 70eec8a48..6b4f36bd3 100644
|
||
--- a/include/linux/netdevice.h
|
||
+++ b/include/linux/netdevice.h
|
||
@@ -3300,7 +3300,11 @@ struct softnet_data {
|
||
int defer_count;
|
||
int defer_ipi_scheduled;
|
||
struct sk_buff *defer_list;
|
||
+#ifndef CONFIG_PREEMPT_RT
|
||
call_single_data_t defer_csd;
|
||
+#else
|
||
+ struct work_struct defer_work;
|
||
+#endif
|
||
};
|
||
|
||
static inline void input_queue_head_incr(struct softnet_data *sd)
|
||
diff --git a/include/linux/printk.h b/include/linux/printk.h
|
||
index e4878bb58..ebebc32e7 100644
|
||
--- a/include/linux/printk.h
|
||
+++ b/include/linux/printk.h
|
||
@@ -9,6 +9,8 @@
|
||
#include <linux/ratelimit_types.h>
|
||
#include <linux/once_lite.h>
|
||
|
||
+struct uart_port;
|
||
+
|
||
extern const char linux_banner[];
|
||
extern const char linux_proc_banner[];
|
||
|
||
@@ -159,13 +161,16 @@ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
|
||
|
||
extern void __printk_safe_enter(void);
|
||
extern void __printk_safe_exit(void);
|
||
+extern void __printk_deferred_enter(void);
|
||
+extern void __printk_deferred_exit(void);
|
||
+
|
||
/*
|
||
* The printk_deferred_enter/exit macros are available only as a hack for
|
||
* some code paths that need to defer all printk console printing. Interrupts
|
||
* must be disabled for the deferred duration.
|
||
*/
|
||
-#define printk_deferred_enter __printk_safe_enter
|
||
-#define printk_deferred_exit __printk_safe_exit
|
||
+#define printk_deferred_enter() __printk_deferred_enter()
|
||
+#define printk_deferred_exit() __printk_deferred_exit()
|
||
|
||
/*
|
||
* Please don't use printk_ratelimit(), because it shares ratelimiting state
|
||
@@ -192,6 +197,10 @@ void show_regs_print_info(const char *log_lvl);
|
||
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
|
||
extern asmlinkage void dump_stack(void) __cold;
|
||
void printk_trigger_flush(void);
|
||
+void printk_legacy_allow_panic_sync(void);
|
||
+extern void nbcon_acquire(struct uart_port *up);
|
||
+extern void nbcon_release(struct uart_port *up);
|
||
+void nbcon_atomic_flush_unsafe(void);
|
||
#else
|
||
static inline __printf(1, 0)
|
||
int vprintk(const char *s, va_list args)
|
||
@@ -271,6 +280,23 @@ static inline void dump_stack(void)
|
||
static inline void printk_trigger_flush(void)
|
||
{
|
||
}
|
||
+
|
||
+static inline void printk_legacy_allow_panic_sync(void)
|
||
+{
|
||
+}
|
||
+
|
||
+static inline void nbcon_acquire(struct uart_port *up)
|
||
+{
|
||
+}
|
||
+
|
||
+static inline void nbcon_release(struct uart_port *up)
|
||
+{
|
||
+}
|
||
+
|
||
+static inline void nbcon_atomic_flush_unsafe(void)
|
||
+{
|
||
+}
|
||
+
|
||
#endif
|
||
|
||
#ifdef CONFIG_SMP
|
||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||
index a694cc11d..1d6148e22 100644
|
||
--- a/include/linux/sched.h
|
||
+++ b/include/linux/sched.h
|
||
@@ -946,6 +946,9 @@ struct task_struct {
|
||
* ->sched_remote_wakeup gets used, so it can be in this word.
|
||
*/
|
||
unsigned sched_remote_wakeup:1;
|
||
+#ifdef CONFIG_RT_MUTEXES
|
||
+ unsigned sched_rt_mutex:1;
|
||
+#endif
|
||
|
||
/* Bit to tell LSMs we're in execve(): */
|
||
unsigned in_execve:1;
|
||
@@ -1992,6 +1995,7 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu
|
||
}
|
||
#endif
|
||
|
||
+extern bool task_is_pi_boosted(const struct task_struct *p);
|
||
extern int yield_to(struct task_struct *p, bool preempt);
|
||
extern void set_user_nice(struct task_struct *p, long nice);
|
||
extern int task_prio(const struct task_struct *p);
|
||
@@ -2143,17 +2147,17 @@ static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
|
||
update_ti_thread_flag(task_thread_info(tsk), flag, value);
|
||
}
|
||
|
||
-static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
+static inline bool test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
{
|
||
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
|
||
}
|
||
|
||
-static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
+static inline bool test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
{
|
||
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
|
||
}
|
||
|
||
-static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
+static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
|
||
{
|
||
return test_ti_thread_flag(task_thread_info(tsk), flag);
|
||
}
|
||
@@ -2166,9 +2170,11 @@ static inline void set_tsk_need_resched(struct task_struct *tsk)
|
||
static inline void clear_tsk_need_resched(struct task_struct *tsk)
|
||
{
|
||
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO))
|
||
+ clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED_LAZY);
|
||
}
|
||
|
||
-static inline int test_tsk_need_resched(struct task_struct *tsk)
|
||
+static inline bool test_tsk_need_resched(struct task_struct *tsk)
|
||
{
|
||
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
|
||
}
|
||
@@ -2349,7 +2355,7 @@ static inline int rwlock_needbreak(rwlock_t *lock)
|
||
|
||
static __always_inline bool need_resched(void)
|
||
{
|
||
- return unlikely(tif_need_resched());
|
||
+ return unlikely(tif_need_resched_lazy() || tif_need_resched());
|
||
}
|
||
|
||
/*
|
||
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
|
||
index 478084f91..719416fe8 100644
|
||
--- a/include/linux/sched/idle.h
|
||
+++ b/include/linux/sched/idle.h
|
||
@@ -63,7 +63,7 @@ static __always_inline bool __must_check current_set_polling_and_test(void)
|
||
*/
|
||
smp_mb__after_atomic();
|
||
|
||
- return unlikely(tif_need_resched());
|
||
+ return unlikely(need_resched());
|
||
}
|
||
|
||
static __always_inline bool __must_check current_clr_polling_and_test(void)
|
||
@@ -76,7 +76,7 @@ static __always_inline bool __must_check current_clr_polling_and_test(void)
|
||
*/
|
||
smp_mb__after_atomic();
|
||
|
||
- return unlikely(tif_need_resched());
|
||
+ return unlikely(need_resched());
|
||
}
|
||
|
||
#else
|
||
@@ -85,11 +85,11 @@ static inline void __current_clr_polling(void) { }
|
||
|
||
static inline bool __must_check current_set_polling_and_test(void)
|
||
{
|
||
- return unlikely(tif_need_resched());
|
||
+ return unlikely(need_resched());
|
||
}
|
||
static inline bool __must_check current_clr_polling_and_test(void)
|
||
{
|
||
- return unlikely(tif_need_resched());
|
||
+ return unlikely(need_resched());
|
||
}
|
||
#endif
|
||
|
||
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
|
||
index 994c25640..b2b9e6eb9 100644
|
||
--- a/include/linux/sched/rt.h
|
||
+++ b/include/linux/sched/rt.h
|
||
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
|
||
}
|
||
|
||
#ifdef CONFIG_RT_MUTEXES
|
||
+extern void rt_mutex_pre_schedule(void);
|
||
+extern void rt_mutex_schedule(void);
|
||
+extern void rt_mutex_post_schedule(void);
|
||
+
|
||
/*
|
||
* Must hold either p->pi_lock or task_rq(p)->lock.
|
||
*/
|
||
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
|
||
index be65de65f..ec46e3b49 100644
|
||
--- a/include/linux/serial_8250.h
|
||
+++ b/include/linux/serial_8250.h
|
||
@@ -153,6 +153,8 @@ struct uart_8250_port {
|
||
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
|
||
unsigned char msr_saved_flags;
|
||
|
||
+ bool console_newline_needed;
|
||
+
|
||
struct uart_8250_dma *dma;
|
||
const struct uart_8250_ops *ops;
|
||
|
||
@@ -204,6 +206,10 @@ void serial8250_init_port(struct uart_8250_port *up);
|
||
void serial8250_set_defaults(struct uart_8250_port *up);
|
||
void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||
unsigned int count);
|
||
+bool serial8250_console_write_atomic(struct uart_8250_port *up,
|
||
+ struct nbcon_write_context *wctxt);
|
||
+bool serial8250_console_write_thread(struct uart_8250_port *up,
|
||
+ struct nbcon_write_context *wctxt);
|
||
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
|
||
int serial8250_console_exit(struct uart_port *port);
|
||
|
||
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
|
||
index 5136eade9..9c7bce638 100644
|
||
--- a/include/linux/serial_core.h
|
||
+++ b/include/linux/serial_core.h
|
||
@@ -489,6 +489,7 @@ struct uart_port {
|
||
struct uart_icount icount; /* statistics */
|
||
|
||
struct console *cons; /* struct console, if any */
|
||
+ bool nbcon_locked_port; /* True, if the port is locked by nbcon */
|
||
/* flags must be updated while holding port mutex */
|
||
upf_t flags;
|
||
|
||
@@ -596,6 +597,7 @@ struct uart_port {
|
||
static inline void uart_port_lock(struct uart_port *up)
|
||
{
|
||
spin_lock(&up->lock);
|
||
+ nbcon_acquire(up);
|
||
}
|
||
|
||
/**
|
||
@@ -605,6 +607,7 @@ static inline void uart_port_lock(struct uart_port *up)
|
||
static inline void uart_port_lock_irq(struct uart_port *up)
|
||
{
|
||
spin_lock_irq(&up->lock);
|
||
+ nbcon_acquire(up);
|
||
}
|
||
|
||
/**
|
||
@@ -615,6 +618,7 @@ static inline void uart_port_lock_irq(struct uart_port *up)
|
||
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
|
||
{
|
||
spin_lock_irqsave(&up->lock, *flags);
|
||
+ nbcon_acquire(up);
|
||
}
|
||
|
||
/**
|
||
@@ -625,7 +629,11 @@ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *f
|
||
*/
|
||
static inline bool uart_port_trylock(struct uart_port *up)
|
||
{
|
||
- return spin_trylock(&up->lock);
|
||
+ if (!spin_trylock(&up->lock))
|
||
+ return false;
|
||
+
|
||
+ nbcon_acquire(up);
|
||
+ return true;
|
||
}
|
||
|
||
/**
|
||
@@ -637,7 +645,11 @@ static inline bool uart_port_trylock(struct uart_port *up)
|
||
*/
|
||
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
|
||
{
|
||
- return spin_trylock_irqsave(&up->lock, *flags);
|
||
+ if (!spin_trylock_irqsave(&up->lock, *flags))
|
||
+ return false;
|
||
+
|
||
+ nbcon_acquire(up);
|
||
+ return true;
|
||
}
|
||
|
||
/**
|
||
@@ -646,6 +658,7 @@ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long
|
||
*/
|
||
static inline void uart_port_unlock(struct uart_port *up)
|
||
{
|
||
+ nbcon_release(up);
|
||
spin_unlock(&up->lock);
|
||
}
|
||
|
||
@@ -655,6 +668,7 @@ static inline void uart_port_unlock(struct uart_port *up)
|
||
*/
|
||
static inline void uart_port_unlock_irq(struct uart_port *up)
|
||
{
|
||
+ nbcon_release(up);
|
||
spin_unlock_irq(&up->lock);
|
||
}
|
||
|
||
@@ -664,6 +678,19 @@ static inline void uart_port_unlock_irq(struct uart_port *up)
|
||
* @flags: The saved interrupt flags for restore
|
||
*/
|
||
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
|
||
+{
|
||
+ nbcon_release(up);
|
||
+ spin_unlock_irqrestore(&up->lock, flags);
|
||
+}
|
||
+
|
||
+/* Only for use in the console->driver_enter() callback. */
|
||
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
|
||
+{
|
||
+ spin_lock_irqsave(&up->lock, *flags);
|
||
+}
|
||
+
|
||
+/* Only for use in the console->driver_exit() callback. */
|
||
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
|
||
{
|
||
spin_unlock_irqrestore(&up->lock, flags);
|
||
}
|
||
@@ -1076,14 +1103,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
|
||
u8 sysrq_ch;
|
||
|
||
if (!port->has_sysrq) {
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
return;
|
||
}
|
||
|
||
sysrq_ch = port->sysrq_ch;
|
||
port->sysrq_ch = 0;
|
||
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
|
||
if (sysrq_ch)
|
||
handle_sysrq(sysrq_ch);
|
||
@@ -1095,14 +1122,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
|
||
u8 sysrq_ch;
|
||
|
||
if (!port->has_sysrq) {
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
return;
|
||
}
|
||
|
||
sysrq_ch = port->sysrq_ch;
|
||
port->sysrq_ch = 0;
|
||
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
|
||
if (sysrq_ch)
|
||
handle_sysrq(sysrq_ch);
|
||
@@ -1118,12 +1145,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
|
||
}
|
||
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
|
||
{
|
||
- spin_unlock(&port->lock);
|
||
+ uart_port_unlock(port);
|
||
}
|
||
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
|
||
unsigned long flags)
|
||
{
|
||
- spin_unlock_irqrestore(&port->lock, flags);
|
||
+ uart_port_unlock_irqrestore(port, flags);
|
||
}
|
||
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
|
||
|
||
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
||
index 74d9fe360..0be63993d 100644
|
||
--- a/include/linux/thread_info.h
|
||
+++ b/include/linux/thread_info.h
|
||
@@ -39,6 +39,16 @@ enum syscall_work_bit {
|
||
|
||
#include <asm/thread_info.h>
|
||
|
||
+#ifdef CONFIG_PREEMPT_BUILD_AUTO
|
||
+# define TIF_NEED_RESCHED_LAZY TIF_ARCH_RESCHED_LAZY
|
||
+# define _TIF_NEED_RESCHED_LAZY _TIF_ARCH_RESCHED_LAZY
|
||
+# define TIF_NEED_RESCHED_LAZY_OFFSET (TIF_NEED_RESCHED_LAZY - TIF_NEED_RESCHED)
|
||
+#else
|
||
+# define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
|
||
+# define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
|
||
+# define TIF_NEED_RESCHED_LAZY_OFFSET 0
|
||
+#endif
|
||
+
|
||
#ifdef __KERNEL__
|
||
|
||
#ifndef arch_set_restart_data
|
||
@@ -114,6 +124,13 @@ static __always_inline bool tif_need_resched(void)
|
||
(unsigned long *)(¤t_thread_info()->flags));
|
||
}
|
||
|
||
+static __always_inline bool tif_need_resched_lazy(void)
|
||
+{
|
||
+ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
|
||
+ arch_test_bit(TIF_NEED_RESCHED_LAZY,
|
||
+ (unsigned long *)(¤t_thread_info()->flags));
|
||
+}
|
||
+
|
||
#else
|
||
|
||
static __always_inline bool tif_need_resched(void)
|
||
@@ -122,6 +139,13 @@ static __always_inline bool tif_need_resched(void)
|
||
(unsigned long *)(¤t_thread_info()->flags));
|
||
}
|
||
|
||
+static __always_inline bool tif_need_resched_lazy(void)
|
||
+{
|
||
+ return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
|
||
+ test_bit(TIF_NEED_RESCHED_LAZY,
|
||
+ (unsigned long *)(¤t_thread_info()->flags));
|
||
+}
|
||
+
|
||
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
|
||
|
||
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
|
||
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
|
||
index 3f10e29df..37bb8e247 100644
|
||
--- a/include/linux/trace_events.h
|
||
+++ b/include/linux/trace_events.h
|
||
@@ -179,8 +179,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
|
||
|
||
enum trace_flag_type {
|
||
TRACE_FLAG_IRQS_OFF = 0x01,
|
||
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
|
||
- TRACE_FLAG_NEED_RESCHED = 0x04,
|
||
+ TRACE_FLAG_NEED_RESCHED = 0x02,
|
||
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x04,
|
||
TRACE_FLAG_HARDIRQ = 0x08,
|
||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
|
||
@@ -206,11 +206,11 @@ static inline unsigned int tracing_gen_ctx(void)
|
||
|
||
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
|
||
{
|
||
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
|
||
+ return tracing_gen_ctx_irq_test(0);
|
||
}
|
||
static inline unsigned int tracing_gen_ctx(void)
|
||
{
|
||
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
|
||
+ return tracing_gen_ctx_irq_test(0);
|
||
}
|
||
#endif
|
||
|
||
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
|
||
index dc2a630f2..f6a3e3b53 100644
|
||
--- a/kernel/Kconfig.preempt
|
||
+++ b/kernel/Kconfig.preempt
|
||
@@ -11,6 +11,13 @@ config PREEMPT_BUILD
|
||
select PREEMPTION
|
||
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
|
||
|
||
+config PREEMPT_BUILD_AUTO
|
||
+ bool
|
||
+ select PREEMPT_BUILD
|
||
+
|
||
+config HAVE_PREEMPT_AUTO
|
||
+ bool
|
||
+
|
||
choice
|
||
prompt "Preemption Model"
|
||
default PREEMPT_NONE
|
||
@@ -67,9 +74,17 @@ config PREEMPT
|
||
embedded system with latency requirements in the milliseconds
|
||
range.
|
||
|
||
+config PREEMPT_AUTO
|
||
+ bool "Automagic preemption mode with runtime tweaking support"
|
||
+ depends on HAVE_PREEMPT_AUTO
|
||
+ select PREEMPT_BUILD_AUTO
|
||
+ help
|
||
+ Add some sensible blurb here
|
||
+
|
||
config PREEMPT_RT
|
||
bool "Fully Preemptible Kernel (Real-Time)"
|
||
depends on EXPERT && ARCH_SUPPORTS_RT
|
||
+ select PREEMPT_BUILD_AUTO if HAVE_PREEMPT_AUTO
|
||
select PREEMPTION
|
||
help
|
||
This option turns the kernel into a real-time kernel by replacing
|
||
@@ -95,7 +110,7 @@ config PREEMPTION
|
||
|
||
config PREEMPT_DYNAMIC
|
||
bool "Preemption behaviour defined on boot"
|
||
- depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
|
||
+ depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT && !PREEMPT_AUTO
|
||
select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
|
||
select PREEMPT_BUILD
|
||
default y if HAVE_PREEMPT_DYNAMIC_CALL
|
||
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
|
||
index 90843cc38..3f31e6b42 100644
|
||
--- a/kernel/entry/common.c
|
||
+++ b/kernel/entry/common.c
|
||
@@ -98,7 +98,7 @@ __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
||
|
||
local_irq_enable_exit_to_user(ti_work);
|
||
|
||
- if (ti_work & _TIF_NEED_RESCHED)
|
||
+ if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
|
||
schedule();
|
||
|
||
if (ti_work & _TIF_UPROBE)
|
||
@@ -307,7 +307,7 @@ void raw_irqentry_exit_cond_resched(void)
|
||
rcu_irq_exit_check_preempt();
|
||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
||
WARN_ON_ONCE(!on_thread_stack());
|
||
- if (need_resched())
|
||
+ if (test_tsk_need_resched(current))
|
||
preempt_schedule_irq();
|
||
}
|
||
}
|
||
diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
|
||
index 2e0f75bcb..d952fa5ee 100644
|
||
--- a/kernel/entry/kvm.c
|
||
+++ b/kernel/entry/kvm.c
|
||
@@ -13,7 +13,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
|
||
return -EINTR;
|
||
}
|
||
|
||
- if (ti_work & _TIF_NEED_RESCHED)
|
||
+ if (ti_work & (_TIF_NEED_RESCHED | TIF_NEED_RESCHED_LAZY))
|
||
schedule();
|
||
|
||
if (ti_work & _TIF_NOTIFY_RESUME)
|
||
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
|
||
index ce2889f12..d636a1bbd 100644
|
||
--- a/kernel/futex/pi.c
|
||
+++ b/kernel/futex/pi.c
|
||
@@ -1,6 +1,7 @@
|
||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||
|
||
#include <linux/slab.h>
|
||
+#include <linux/sched/rt.h>
|
||
#include <linux/sched/task.h>
|
||
|
||
#include "futex.h"
|
||
@@ -610,29 +611,16 @@ int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
|
||
/*
|
||
* Caller must hold a reference on @pi_state.
|
||
*/
|
||
-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
|
||
+static int wake_futex_pi(u32 __user *uaddr, u32 uval,
|
||
+ struct futex_pi_state *pi_state,
|
||
+ struct rt_mutex_waiter *top_waiter)
|
||
{
|
||
- struct rt_mutex_waiter *top_waiter;
|
||
struct task_struct *new_owner;
|
||
bool postunlock = false;
|
||
DEFINE_RT_WAKE_Q(wqh);
|
||
u32 curval, newval;
|
||
int ret = 0;
|
||
|
||
- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
|
||
- if (WARN_ON_ONCE(!top_waiter)) {
|
||
- /*
|
||
- * As per the comment in futex_unlock_pi() this should not happen.
|
||
- *
|
||
- * When this happens, give up our locks and try again, giving
|
||
- * the futex_lock_pi() instance time to complete, either by
|
||
- * waiting on the rtmutex or removing itself from the futex
|
||
- * queue.
|
||
- */
|
||
- ret = -EAGAIN;
|
||
- goto out_unlock;
|
||
- }
|
||
-
|
||
new_owner = top_waiter->task;
|
||
|
||
/*
|
||
@@ -1002,6 +990,12 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
|
||
goto no_block;
|
||
}
|
||
|
||
+ /*
|
||
+ * Must be done before we enqueue the waiter, here is unfortunately
|
||
+ * under the hb lock, but that *should* work because it does nothing.
|
||
+ */
|
||
+ rt_mutex_pre_schedule();
|
||
+
|
||
rt_mutex_init_waiter(&rt_waiter);
|
||
|
||
/*
|
||
@@ -1039,19 +1033,37 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
|
||
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
||
|
||
cleanup:
|
||
- spin_lock(q.lock_ptr);
|
||
/*
|
||
* If we failed to acquire the lock (deadlock/signal/timeout), we must
|
||
- * first acquire the hb->lock before removing the lock from the
|
||
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
|
||
- * lists consistent.
|
||
+ * must unwind the above, however we canont lock hb->lock because
|
||
+ * rt_mutex already has a waiter enqueued and hb->lock can itself try
|
||
+ * and enqueue an rt_waiter through rtlock.
|
||
+ *
|
||
+ * Doing the cleanup without holding hb->lock can cause inconsistent
|
||
+ * state between hb and pi_state, but only in the direction of not
|
||
+ * seeing a waiter that is leaving.
|
||
+ *
|
||
+ * See futex_unlock_pi(), it deals with this inconsistency.
|
||
+ *
|
||
+ * There be dragons here, since we must deal with the inconsistency on
|
||
+ * the way out (here), it is impossible to detect/warn about the race
|
||
+ * the other way around (missing an incoming waiter).
|
||
*
|
||
- * In particular; it is important that futex_unlock_pi() can not
|
||
- * observe this inconsistency.
|
||
+ * What could possibly go wrong...
|
||
*/
|
||
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
|
||
ret = 0;
|
||
|
||
+ /*
|
||
+ * Now that the rt_waiter has been dequeued, it is safe to use
|
||
+ * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
|
||
+ * the
|
||
+ */
|
||
+ spin_lock(q.lock_ptr);
|
||
+ /*
|
||
+ * Waiter is unqueued.
|
||
+ */
|
||
+ rt_mutex_post_schedule();
|
||
no_block:
|
||
/*
|
||
* Fixup the pi_state owner and possibly acquire the lock if we
|
||
@@ -1132,6 +1144,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||
top_waiter = futex_top_waiter(hb, &key);
|
||
if (top_waiter) {
|
||
struct futex_pi_state *pi_state = top_waiter->pi_state;
|
||
+ struct rt_mutex_waiter *rt_waiter;
|
||
|
||
ret = -EINVAL;
|
||
if (!pi_state)
|
||
@@ -1144,22 +1157,39 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||
if (pi_state->owner != current)
|
||
goto out_unlock;
|
||
|
||
- get_pi_state(pi_state);
|
||
/*
|
||
* By taking wait_lock while still holding hb->lock, we ensure
|
||
- * there is no point where we hold neither; and therefore
|
||
- * wake_futex_p() must observe a state consistent with what we
|
||
- * observed.
|
||
+ * there is no point where we hold neither; and thereby
|
||
+ * wake_futex_pi() must observe any new waiters.
|
||
+ *
|
||
+ * Since the cleanup: case in futex_lock_pi() removes the
|
||
+ * rt_waiter without holding hb->lock, it is possible for
|
||
+ * wake_futex_pi() to not find a waiter while the above does,
|
||
+ * in this case the waiter is on the way out and it can be
|
||
+ * ignored.
|
||
*
|
||
* In particular; this forces __rt_mutex_start_proxy() to
|
||
* complete such that we're guaranteed to observe the
|
||
- * rt_waiter. Also see the WARN in wake_futex_pi().
|
||
+ * rt_waiter.
|
||
*/
|
||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||
+
|
||
+ /*
|
||
+ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
|
||
+ * waiters even though futex thinks there are, then the waiter
|
||
+ * is leaving and the uncontended path is safe to take.
|
||
+ */
|
||
+ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
|
||
+ if (!rt_waiter) {
|
||
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||
+ goto do_uncontended;
|
||
+ }
|
||
+
|
||
+ get_pi_state(pi_state);
|
||
spin_unlock(&hb->lock);
|
||
|
||
/* drops pi_state->pi_mutex.wait_lock */
|
||
- ret = wake_futex_pi(uaddr, uval, pi_state);
|
||
+ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
|
||
|
||
put_pi_state(pi_state);
|
||
|
||
@@ -1187,6 +1217,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||
return ret;
|
||
}
|
||
|
||
+do_uncontended:
|
||
/*
|
||
* We have no kernel internal state, i.e. no waiters in the
|
||
* kernel. Waiters which are about to queue themselves are stuck
|
||
diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c
|
||
index cba8b1a6a..4c73e0b81 100644
|
||
--- a/kernel/futex/requeue.c
|
||
+++ b/kernel/futex/requeue.c
|
||
@@ -850,11 +850,13 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
pi_mutex = &q.pi_state->pi_mutex;
|
||
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
|
||
|
||
- /* Current is not longer pi_blocked_on */
|
||
- spin_lock(q.lock_ptr);
|
||
+ /*
|
||
+ * See futex_unlock_pi()'s cleanup: comment.
|
||
+ */
|
||
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
|
||
ret = 0;
|
||
|
||
+ spin_lock(q.lock_ptr);
|
||
debug_rt_mutex_free_waiter(&rt_waiter);
|
||
/*
|
||
* Fixup the pi_state owner and possibly acquire the lock if we
|
||
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
|
||
index 1d4bc493b..486c68c11 100644
|
||
--- a/kernel/ksysfs.c
|
||
+++ b/kernel/ksysfs.c
|
||
@@ -179,6 +179,15 @@ KERNEL_ATTR_RO(crash_elfcorehdr_size);
|
||
|
||
#endif /* CONFIG_CRASH_CORE */
|
||
|
||
+#if defined(CONFIG_PREEMPT_RT)
|
||
+static ssize_t realtime_show(struct kobject *kobj,
|
||
+ struct kobj_attribute *attr, char *buf)
|
||
+{
|
||
+ return sprintf(buf, "%d\n", 1);
|
||
+}
|
||
+KERNEL_ATTR_RO(realtime);
|
||
+#endif
|
||
+
|
||
/* whether file capabilities are enabled */
|
||
static ssize_t fscaps_show(struct kobject *kobj,
|
||
struct kobj_attribute *attr, char *buf)
|
||
@@ -274,6 +283,9 @@ static struct attribute * kernel_attrs[] = {
|
||
#ifndef CONFIG_TINY_RCU
|
||
&rcu_expedited_attr.attr,
|
||
&rcu_normal_attr.attr,
|
||
+#endif
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+ &realtime_attr.attr,
|
||
#endif
|
||
NULL
|
||
};
|
||
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
|
||
index 3468d8230..6eef8527e 100644
|
||
--- a/kernel/locking/lockdep.c
|
||
+++ b/kernel/locking/lockdep.c
|
||
@@ -56,6 +56,7 @@
|
||
#include <linux/kprobes.h>
|
||
#include <linux/lockdep.h>
|
||
#include <linux/context_tracking.h>
|
||
+#include <linux/console.h>
|
||
|
||
#include <asm/sections.h>
|
||
|
||
@@ -3971,6 +3972,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
||
if (!debug_locks_off() || debug_locks_silent)
|
||
return;
|
||
|
||
+ nbcon_cpu_emergency_enter();
|
||
+
|
||
pr_warn("\n");
|
||
pr_warn("================================\n");
|
||
pr_warn("WARNING: inconsistent lock state\n");
|
||
@@ -3999,6 +4002,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
||
|
||
pr_warn("\nstack backtrace:\n");
|
||
dump_stack();
|
||
+
|
||
+ nbcon_cpu_emergency_exit();
|
||
}
|
||
|
||
/*
|
||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||
index bf3a28ee7..99129c891 100644
|
||
--- a/kernel/locking/rtmutex.c
|
||
+++ b/kernel/locking/rtmutex.c
|
||
@@ -218,6 +218,11 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
|
||
return try_cmpxchg_acquire(&lock->owner, &old, new);
|
||
}
|
||
|
||
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
|
||
+{
|
||
+ return rt_mutex_cmpxchg_acquire(lock, NULL, current);
|
||
+}
|
||
+
|
||
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
|
||
struct task_struct *old,
|
||
struct task_struct *new)
|
||
@@ -297,6 +302,20 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
|
||
|
||
}
|
||
|
||
+static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
|
||
+
|
||
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
|
||
+{
|
||
+ /*
|
||
+ * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
|
||
+ *
|
||
+ * Avoid unconditionally taking the slow path by using
|
||
+ * rt_mutex_slow_trylock() which is covered by the debug code and can
|
||
+ * acquire a non-contended rtmutex.
|
||
+ */
|
||
+ return rt_mutex_slowtrylock(lock);
|
||
+}
|
||
+
|
||
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
|
||
struct task_struct *old,
|
||
struct task_struct *new)
|
||
@@ -1613,7 +1632,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||
raw_spin_unlock_irq(&lock->wait_lock);
|
||
|
||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||
- schedule();
|
||
+ rt_mutex_schedule();
|
||
|
||
raw_spin_lock_irq(&lock->wait_lock);
|
||
set_current_state(state);
|
||
@@ -1643,7 +1662,7 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||
|
||
while (1) {
|
||
set_current_state(TASK_INTERRUPTIBLE);
|
||
- schedule();
|
||
+ rt_mutex_schedule();
|
||
}
|
||
}
|
||
|
||
@@ -1738,6 +1757,15 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||
unsigned long flags;
|
||
int ret;
|
||
|
||
+ /*
|
||
+ * Do all pre-schedule work here, before we queue a waiter and invoke
|
||
+ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
|
||
+ * otherwise recurse back into task_blocks_on_rt_mutex() through
|
||
+ * rtlock_slowlock() and will then enqueue a second waiter for this
|
||
+ * same task and things get really confusing real fast.
|
||
+ */
|
||
+ rt_mutex_pre_schedule();
|
||
+
|
||
/*
|
||
* Technically we could use raw_spin_[un]lock_irq() here, but this can
|
||
* be called in early boot if the cmpxchg() fast path is disabled
|
||
@@ -1749,6 +1777,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
|
||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||
+ rt_mutex_post_schedule();
|
||
|
||
return ret;
|
||
}
|
||
@@ -1756,7 +1785,9 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
|
||
unsigned int state)
|
||
{
|
||
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
||
+ lockdep_assert(!current->pi_blocked_on);
|
||
+
|
||
+ if (likely(rt_mutex_try_acquire(lock)))
|
||
return 0;
|
||
|
||
return rt_mutex_slowlock(lock, NULL, state);
|
||
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
|
||
index 25ec02394..34a59569d 100644
|
||
--- a/kernel/locking/rwbase_rt.c
|
||
+++ b/kernel/locking/rwbase_rt.c
|
||
@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||
struct rt_mutex_base *rtm = &rwb->rtmutex;
|
||
int ret;
|
||
|
||
+ rwbase_pre_schedule();
|
||
raw_spin_lock_irq(&rtm->wait_lock);
|
||
|
||
/*
|
||
@@ -125,12 +126,15 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||
rwbase_rtmutex_unlock(rtm);
|
||
|
||
trace_contention_end(rwb, ret);
|
||
+ rwbase_post_schedule();
|
||
return ret;
|
||
}
|
||
|
||
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
|
||
unsigned int state)
|
||
{
|
||
+ lockdep_assert(!current->pi_blocked_on);
|
||
+
|
||
if (rwbase_read_trylock(rwb))
|
||
return 0;
|
||
|
||
@@ -237,6 +241,8 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||
/* Force readers into slow path */
|
||
atomic_sub(READER_BIAS, &rwb->readers);
|
||
|
||
+ rwbase_pre_schedule();
|
||
+
|
||
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
|
||
if (__rwbase_write_trylock(rwb))
|
||
goto out_unlock;
|
||
@@ -248,6 +254,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||
if (rwbase_signal_pending_state(state, current)) {
|
||
rwbase_restore_current_state();
|
||
__rwbase_write_unlock(rwb, 0, flags);
|
||
+ rwbase_post_schedule();
|
||
trace_contention_end(rwb, -EINTR);
|
||
return -EINTR;
|
||
}
|
||
@@ -266,6 +273,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||
|
||
out_unlock:
|
||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||
+ rwbase_post_schedule();
|
||
return 0;
|
||
}
|
||
|
||
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
|
||
index 11ed7ce65..62eac9fd8 100644
|
||
--- a/kernel/locking/rwsem.c
|
||
+++ b/kernel/locking/rwsem.c
|
||
@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||
#define rwbase_signal_pending_state(state, current) \
|
||
signal_pending_state(state, current)
|
||
|
||
+#define rwbase_pre_schedule() \
|
||
+ rt_mutex_pre_schedule()
|
||
+
|
||
#define rwbase_schedule() \
|
||
- schedule()
|
||
+ rt_mutex_schedule()
|
||
+
|
||
+#define rwbase_post_schedule() \
|
||
+ rt_mutex_post_schedule()
|
||
|
||
#include "rwbase_rt.c"
|
||
|
||
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
|
||
index 48a19ed84..38e292454 100644
|
||
--- a/kernel/locking/spinlock_rt.c
|
||
+++ b/kernel/locking/spinlock_rt.c
|
||
@@ -37,6 +37,8 @@
|
||
|
||
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
|
||
{
|
||
+ lockdep_assert(!current->pi_blocked_on);
|
||
+
|
||
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
|
||
rtlock_slowlock(rtm);
|
||
}
|
||
@@ -184,9 +186,13 @@ static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
|
||
|
||
#define rwbase_signal_pending_state(state, current) (0)
|
||
|
||
+#define rwbase_pre_schedule()
|
||
+
|
||
#define rwbase_schedule() \
|
||
schedule_rtlock()
|
||
|
||
+#define rwbase_post_schedule()
|
||
+
|
||
#include "rwbase_rt.c"
|
||
/*
|
||
* The common functions which get wrapped into the rwlock API.
|
||
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
|
||
index d1473c624..c7196de83 100644
|
||
--- a/kernel/locking/ww_rt_mutex.c
|
||
+++ b/kernel/locking/ww_rt_mutex.c
|
||
@@ -62,7 +62,7 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||
}
|
||
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
|
||
|
||
- if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
|
||
+ if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
|
||
if (ww_ctx)
|
||
ww_mutex_set_context_fastpath(lock, ww_ctx);
|
||
return 0;
|
||
diff --git a/kernel/panic.c b/kernel/panic.c
|
||
index ef9f9a4e9..9215df21d 100644
|
||
--- a/kernel/panic.c
|
||
+++ b/kernel/panic.c
|
||
@@ -366,6 +366,8 @@ void panic(const char *fmt, ...)
|
||
*/
|
||
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
|
||
|
||
+ printk_legacy_allow_panic_sync();
|
||
+
|
||
panic_print_sys_info(false);
|
||
|
||
kmsg_dump(KMSG_DUMP_PANIC);
|
||
@@ -449,6 +451,7 @@ void panic(const char *fmt, ...)
|
||
* Explicitly flush the kernel log buffer one last time.
|
||
*/
|
||
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
|
||
+ nbcon_atomic_flush_unsafe();
|
||
|
||
local_irq_enable();
|
||
for (i = 0; ; i += PANIC_TIMER_STEP) {
|
||
@@ -627,6 +630,7 @@ bool oops_may_print(void)
|
||
*/
|
||
void oops_enter(void)
|
||
{
|
||
+ nbcon_cpu_emergency_enter();
|
||
tracing_off();
|
||
/* can't trust the integrity of the kernel anymore: */
|
||
debug_locks_off();
|
||
@@ -649,6 +653,7 @@ void oops_exit(void)
|
||
{
|
||
do_oops_enter_exit();
|
||
print_oops_end_marker();
|
||
+ nbcon_cpu_emergency_exit();
|
||
kmsg_dump(KMSG_DUMP_OOPS);
|
||
}
|
||
|
||
@@ -660,6 +665,8 @@ struct warn_args {
|
||
void __warn(const char *file, int line, void *caller, unsigned taint,
|
||
struct pt_regs *regs, struct warn_args *args)
|
||
{
|
||
+ nbcon_cpu_emergency_enter();
|
||
+
|
||
disable_trace_on_warning();
|
||
|
||
if (file)
|
||
@@ -690,6 +697,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
||
|
||
/* Just a warning, don't kill lockdep. */
|
||
add_taint(taint, LOCKDEP_STILL_OK);
|
||
+
|
||
+ nbcon_cpu_emergency_exit();
|
||
}
|
||
|
||
#ifdef CONFIG_BUG
|
||
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
|
||
index f5b388e81..39a2b61c7 100644
|
||
--- a/kernel/printk/Makefile
|
||
+++ b/kernel/printk/Makefile
|
||
@@ -1,6 +1,6 @@
|
||
# SPDX-License-Identifier: GPL-2.0-only
|
||
obj-y = printk.o
|
||
-obj-$(CONFIG_PRINTK) += printk_safe.o
|
||
+obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o
|
||
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
|
||
obj-$(CONFIG_PRINTK_INDEX) += index.o
|
||
|
||
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
|
||
index 7d4979d5c..7db6992c5 100644
|
||
--- a/kernel/printk/internal.h
|
||
+++ b/kernel/printk/internal.h
|
||
@@ -3,6 +3,8 @@
|
||
* internal.h - printk internal definitions
|
||
*/
|
||
#include <linux/percpu.h>
|
||
+#include <linux/console.h>
|
||
+#include "printk_ringbuffer.h"
|
||
|
||
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
|
||
void __init printk_sysctl_init(void);
|
||
@@ -12,6 +14,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
|
||
#define printk_sysctl_init() do { } while (0)
|
||
#endif
|
||
|
||
+#define con_printk(lvl, con, fmt, ...) \
|
||
+ printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
|
||
+ (con->flags & CON_NBCON) ? "" : "legacy ", \
|
||
+ (con->flags & CON_BOOT) ? "boot" : "", \
|
||
+ con->name, con->index, ##__VA_ARGS__)
|
||
+
|
||
#ifdef CONFIG_PRINTK
|
||
|
||
#ifdef CONFIG_PRINTK_CALLER
|
||
@@ -35,6 +43,19 @@ enum printk_info_flags {
|
||
LOG_CONT = 8, /* text is a fragment of a continuation line */
|
||
};
|
||
|
||
+extern struct printk_ringbuffer *prb;
|
||
+extern bool printk_threads_enabled;
|
||
+extern bool have_legacy_console;
|
||
+extern bool have_boot_console;
|
||
+
|
||
+/*
|
||
+ * Specifies if the console lock/unlock dance is needed for console
|
||
+ * printing. If @have_boot_console is true, the nbcon consoles will
|
||
+ * be printed serially along with the legacy consoles because nbcon
|
||
+ * consoles cannot print simultaneously with boot consoles.
|
||
+ */
|
||
+#define printing_via_unlock (have_legacy_console || have_boot_console)
|
||
+
|
||
__printf(4, 0)
|
||
int vprintk_store(int facility, int level,
|
||
const struct dev_printk_info *dev_info,
|
||
@@ -61,12 +82,90 @@ void defer_console_output(void);
|
||
|
||
u16 printk_parse_prefix(const char *text, int *level,
|
||
enum printk_info_flags *flags);
|
||
+void console_lock_spinning_enable(void);
|
||
+int console_lock_spinning_disable_and_check(int cookie);
|
||
+
|
||
+u64 nbcon_seq_read(struct console *con);
|
||
+void nbcon_seq_force(struct console *con, u64 seq);
|
||
+bool nbcon_alloc(struct console *con);
|
||
+void nbcon_init(struct console *con);
|
||
+void nbcon_free(struct console *con);
|
||
+enum nbcon_prio nbcon_get_default_prio(void);
|
||
+void nbcon_atomic_flush_all(void);
|
||
+bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie);
|
||
+void nbcon_kthread_create(struct console *con);
|
||
+void nbcon_wake_threads(void);
|
||
+void nbcon_legacy_kthread_create(void);
|
||
+
|
||
+/*
|
||
+ * Check if the given console is currently capable and allowed to print
|
||
+ * records. Note that this function does not consider the current context,
|
||
+ * which can also play a role in deciding if @con can be used to print
|
||
+ * records.
|
||
+ */
|
||
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
|
||
+{
|
||
+ if (!(flags & CON_ENABLED))
|
||
+ return false;
|
||
+
|
||
+ if ((flags & CON_SUSPENDED))
|
||
+ return false;
|
||
+
|
||
+ if (flags & CON_NBCON) {
|
||
+ if (use_atomic) {
|
||
+ if (!con->write_atomic)
|
||
+ return false;
|
||
+ } else {
|
||
+ if (!con->write_thread || !con->kthread)
|
||
+ return false;
|
||
+ }
|
||
+ } else {
|
||
+ if (!con->write)
|
||
+ return false;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Console drivers may assume that per-cpu resources have been
|
||
+ * allocated. So unless they're explicitly marked as being able to
|
||
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
||
+ */
|
||
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
||
+ return false;
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_kthread_wake - Wake up a printk thread
|
||
+ * @con: Console to operate on
|
||
+ */
|
||
+static inline void nbcon_kthread_wake(struct console *con)
|
||
+{
|
||
+ /*
|
||
+ * Guarantee any new records can be seen by tasks preparing to wait
|
||
+ * before this context checks if the rcuwait is empty.
|
||
+ *
|
||
+ * The full memory barrier in rcuwait_wake_up() pairs with the full
|
||
+ * memory barrier within set_current_state() of
|
||
+ * ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
|
||
+ * adds the waiter but before it has checked the wait condition.
|
||
+ *
|
||
+ * This pairs with nbcon_kthread_func:A.
|
||
+ */
|
||
+ rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
|
||
+}
|
||
+
|
||
#else
|
||
|
||
#define PRINTK_PREFIX_MAX 0
|
||
#define PRINTK_MESSAGE_MAX 0
|
||
#define PRINTKRB_RECORD_MAX 0
|
||
|
||
+static inline void nbcon_kthread_wake(struct console *con) { }
|
||
+static inline void nbcon_kthread_create(struct console *con) { }
|
||
+#define printk_threads_enabled (false)
|
||
+#define printing_via_unlock (false)
|
||
+
|
||
/*
|
||
* In !PRINTK builds we still export console_sem
|
||
* semaphore and some of console functions (console_unlock()/etc.), so
|
||
@@ -76,8 +175,23 @@ u16 printk_parse_prefix(const char *text, int *level,
|
||
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
|
||
|
||
static inline bool printk_percpu_data_ready(void) { return false; }
|
||
+static inline u64 nbcon_seq_read(struct console *con) { return 0; }
|
||
+static inline void nbcon_seq_force(struct console *con, u64 seq) { }
|
||
+static inline bool nbcon_alloc(struct console *con) { return false; }
|
||
+static inline void nbcon_init(struct console *con) { }
|
||
+static inline void nbcon_free(struct console *con) { }
|
||
+static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
|
||
+static inline void nbcon_atomic_flush_all(void) { }
|
||
+static inline bool nbcon_atomic_emit_next_record(struct console *con, bool *handover,
|
||
+ int cookie) { return false; }
|
||
+
|
||
+static inline bool console_is_usable(struct console *con, short flags,
|
||
+ bool use_atomic) { return false; }
|
||
+
|
||
#endif /* CONFIG_PRINTK */
|
||
|
||
+extern struct printk_buffers printk_shared_pbufs;
|
||
+
|
||
/**
|
||
* struct printk_buffers - Buffers to read/format/output printk messages.
|
||
* @outbuf: After formatting, contains text to output.
|
||
@@ -105,3 +219,10 @@ struct printk_message {
|
||
};
|
||
|
||
bool other_cpu_in_panic(void);
|
||
+bool this_cpu_in_panic(void);
|
||
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
+ bool is_extended, bool may_supress);
|
||
+
|
||
+#ifdef CONFIG_PRINTK
|
||
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
|
||
+#endif
|
||
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
|
||
new file mode 100644
|
||
index 000000000..b53d93585
|
||
--- /dev/null
|
||
+++ b/kernel/printk/nbcon.c
|
||
@@ -0,0 +1,1664 @@
|
||
+// SPDX-License-Identifier: GPL-2.0-only
|
||
+// Copyright (C) 2022 Linutronix GmbH, John Ogness
|
||
+// Copyright (C) 2022 Intel, Thomas Gleixner
|
||
+
|
||
+#include <linux/kernel.h>
|
||
+#include <linux/console.h>
|
||
+#include <linux/delay.h>
|
||
+#include <linux/kthread.h>
|
||
+#include <linux/slab.h>
|
||
+#include <linux/serial_core.h>
|
||
+#include <linux/syscore_ops.h>
|
||
+#include "printk_ringbuffer.h"
|
||
+#include "internal.h"
|
||
+/*
|
||
+ * Printk console printing implementation for consoles which does not depend
|
||
+ * on the legacy style console_lock mechanism.
|
||
+ *
|
||
+ * The state of the console is maintained in the "nbcon_state" atomic
|
||
+ * variable.
|
||
+ *
|
||
+ * The console is locked when:
|
||
+ *
|
||
+ * - The 'prio' field contains the priority of the context that owns the
|
||
+ * console. Only higher priority contexts are allowed to take over the
|
||
+ * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
|
||
+ *
|
||
+ * - The 'cpu' field denotes on which CPU the console is locked. It is used
|
||
+ * to prevent busy waiting on the same CPU. Also it informs the lock owner
|
||
+ * that it has lost the lock in a more complex scenario when the lock was
|
||
+ * taken over by a higher priority context, released, and taken on another
|
||
+ * CPU with the same priority as the interrupted owner.
|
||
+ *
|
||
+ * The acquire mechanism uses a few more fields:
|
||
+ *
|
||
+ * - The 'req_prio' field is used by the handover approach to make the
|
||
+ * current owner aware that there is a context with a higher priority
|
||
+ * waiting for the friendly handover.
|
||
+ *
|
||
+ * - The 'unsafe' field allows to take over the console in a safe way in the
|
||
+ * middle of emitting a message. The field is set only when accessing some
|
||
+ * shared resources or when the console device is manipulated. It can be
|
||
+ * cleared, for example, after emitting one character when the console
|
||
+ * device is in a consistent state.
|
||
+ *
|
||
+ * - The 'unsafe_takeover' field is set when a hostile takeover took the
|
||
+ * console in an unsafe state. The console will stay in the unsafe state
|
||
+ * until re-initialized.
|
||
+ *
|
||
+ * The acquire mechanism uses three approaches:
|
||
+ *
|
||
+ * 1) Direct acquire when the console is not owned or is owned by a lower
|
||
+ * priority context and is in a safe state.
|
||
+ *
|
||
+ * 2) Friendly handover mechanism uses a request/grant handshake. It is used
|
||
+ * when the current owner has lower priority and the console is in an
|
||
+ * unsafe state.
|
||
+ *
|
||
+ * The requesting context:
|
||
+ *
|
||
+ * a) Sets its priority into the 'req_prio' field.
|
||
+ *
|
||
+ * b) Waits (with a timeout) for the owning context to unlock the
|
||
+ * console.
|
||
+ *
|
||
+ * c) Takes the lock and clears the 'req_prio' field.
|
||
+ *
|
||
+ * The owning context:
|
||
+ *
|
||
+ * a) Observes the 'req_prio' field set on exit from the unsafe
|
||
+ * console state.
|
||
+ *
|
||
+ * b) Gives up console ownership by clearing the 'prio' field.
|
||
+ *
|
||
+ * 3) Unsafe hostile takeover allows to take over the lock even when the
|
||
+ * console is an unsafe state. It is used only in panic() by the final
|
||
+ * attempt to flush consoles in a try and hope mode.
|
||
+ *
|
||
+ * Note that separate record buffers are used in panic(). As a result,
|
||
+ * the messages can be read and formatted without any risk even after
|
||
+ * using the hostile takeover in unsafe state.
|
||
+ *
|
||
+ * The release function simply clears the 'prio' field.
|
||
+ *
|
||
+ * All operations on @console::nbcon_state are atomic cmpxchg based to
|
||
+ * handle concurrency.
|
||
+ *
|
||
+ * The acquire/release functions implement only minimal policies:
|
||
+ *
|
||
+ * - Preference for higher priority contexts.
|
||
+ * - Protection of the panic CPU.
|
||
+ *
|
||
+ * All other policy decisions must be made at the call sites:
|
||
+ *
|
||
+ * - What is marked as an unsafe section.
|
||
+ * - Whether to spin-wait if there is already an owner and the console is
|
||
+ * in an unsafe state.
|
||
+ * - Whether to attempt an unsafe hostile takeover.
|
||
+ *
|
||
+ * The design allows to implement the well known:
|
||
+ *
|
||
+ * acquire()
|
||
+ * output_one_printk_record()
|
||
+ * release()
|
||
+ *
|
||
+ * The output of one printk record might be interrupted with a higher priority
|
||
+ * context. The new owner is supposed to reprint the entire interrupted record
|
||
+ * from scratch.
|
||
+ */
|
||
+
|
||
+/**
|
||
+ * nbcon_state_set - Helper function to set the console state
|
||
+ * @con: Console to update
|
||
+ * @new: The new state to write
|
||
+ *
|
||
+ * Only to be used when the console is not yet or no longer visible in the
|
||
+ * system. Otherwise use nbcon_state_try_cmpxchg().
|
||
+ */
|
||
+static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
|
||
+{
|
||
+ atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_state_read - Helper function to read the console state
|
||
+ * @con: Console to read
|
||
+ * @state: The state to store the result
|
||
+ */
|
||
+static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
|
||
+{
|
||
+ state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
|
||
+ * @con: Console to update
|
||
+ * @cur: Old/expected state
|
||
+ * @new: New state
|
||
+ *
|
||
+ * Return: True on success. False on fail and @cur is updated.
|
||
+ */
|
||
+static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
|
||
+ struct nbcon_state *new)
|
||
+{
|
||
+ return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_seq_read - Read the current console sequence
|
||
+ * @con: Console to read the sequence of
|
||
+ *
|
||
+ * Return: Sequence number of the next record to print on @con.
|
||
+ */
|
||
+u64 nbcon_seq_read(struct console *con)
|
||
+{
|
||
+ unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
|
||
+
|
||
+ return __ulseq_to_u64seq(prb, nbcon_seq);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_seq_force - Force console sequence to a specific value
|
||
+ * @con: Console to work on
|
||
+ * @seq: Sequence number value to set
|
||
+ *
|
||
+ * Only to be used during init (before registration) or in extreme situations
|
||
+ * (such as panic with CONSOLE_REPLAY_ALL).
|
||
+ */
|
||
+void nbcon_seq_force(struct console *con, u64 seq)
|
||
+{
|
||
+ /*
|
||
+ * If the specified record no longer exists, the oldest available record
|
||
+ * is chosen. This is especially important on 32bit systems because only
|
||
+ * the lower 32 bits of the sequence number are stored. The upper 32 bits
|
||
+ * are derived from the sequence numbers available in the ringbuffer.
|
||
+ */
|
||
+ u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
|
||
+
|
||
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
|
||
+
|
||
+ /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
|
||
+ con->seq = 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_seq_try_update - Try to update the console sequence number
|
||
+ * @ctxt: Pointer to an acquire context that contains
|
||
+ * all information about the acquire mode
|
||
+ * @new_seq: The new sequence number to set
|
||
+ *
|
||
+ * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
|
||
+ * the 64bit value). This could be a different value than @new_seq if
|
||
+ * nbcon_seq_force() was used or the current context no longer owns the
|
||
+ * console. In the later case, it will stop printing anyway.
|
||
+ */
|
||
+static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
|
||
+{
|
||
+ unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
|
||
+ struct console *con = ctxt->console;
|
||
+
|
||
+ if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
|
||
+ __u64seq_to_ulseq(new_seq))) {
|
||
+ ctxt->seq = new_seq;
|
||
+ } else {
|
||
+ ctxt->seq = nbcon_seq_read(con);
|
||
+ }
|
||
+}
|
||
+
|
||
+bool printk_threads_enabled __ro_after_init;
|
||
+
|
||
+/**
|
||
+ * nbcon_context_try_acquire_direct - Try to acquire directly
|
||
+ * @ctxt: The context of the caller
|
||
+ * @cur: The current console state
|
||
+ *
|
||
+ * Acquire the console when it is released. Also acquire the console when
|
||
+ * the current owner has a lower priority and the console is in a safe state.
|
||
+ *
|
||
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
|
||
+ * is updated to the latest state when failed to modify it.
|
||
+ *
|
||
+ * Errors:
|
||
+ *
|
||
+ * -EPERM: A panic is in progress and this is not the panic CPU.
|
||
+ * Or the current owner or waiter has the same or higher
|
||
+ * priority. No acquire method can be successful in
|
||
+ * this case.
|
||
+ *
|
||
+ * -EBUSY: The current owner has a lower priority but the console
|
||
+ * in an unsafe state. The caller should try using
|
||
+ * the handover acquire method.
|
||
+ */
|
||
+static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
|
||
+ struct nbcon_state *cur)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state new;
|
||
+
|
||
+ do {
|
||
+ if (other_cpu_in_panic())
|
||
+ return -EPERM;
|
||
+
|
||
+ if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
|
||
+ return -EPERM;
|
||
+
|
||
+ if (cur->unsafe)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /*
|
||
+ * The console should never be safe for a direct acquire
|
||
+ * if an unsafe hostile takeover has ever happened.
|
||
+ */
|
||
+ WARN_ON_ONCE(cur->unsafe_takeover);
|
||
+
|
||
+ new.atom = cur->atom;
|
||
+ new.prio = ctxt->prio;
|
||
+ new.req_prio = NBCON_PRIO_NONE;
|
||
+ new.unsafe = cur->unsafe_takeover;
|
||
+ new.cpu = cpu;
|
||
+
|
||
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
|
||
+{
|
||
+ /*
|
||
+ * The request context is well defined by the @req_prio because:
|
||
+ *
|
||
+ * - Only a context with a higher priority can take over the request.
|
||
+ * - There are only three priorities.
|
||
+ * - Only one CPU is allowed to request PANIC priority.
|
||
+ * - Lower priorities are ignored during panic() until reboot.
|
||
+ *
|
||
+ * As a result, the following scenario is *not* possible:
|
||
+ *
|
||
+ * 1. Another context with a higher priority directly takes ownership.
|
||
+ * 2. The higher priority context releases the ownership.
|
||
+ * 3. A lower priority context takes the ownership.
|
||
+ * 4. Another context with the same priority as this context
|
||
+ * creates a request and starts waiting.
|
||
+ */
|
||
+
|
||
+ return (cur->req_prio == expected_prio);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_context_try_acquire_requested - Try to acquire after having
|
||
+ * requested a handover
|
||
+ * @ctxt: The context of the caller
|
||
+ * @cur: The current console state
|
||
+ *
|
||
+ * This is a helper function for nbcon_context_try_acquire_handover().
|
||
+ * It is called when the console is in an unsafe state. The current
|
||
+ * owner will release the console on exit from the unsafe region.
|
||
+ *
|
||
+ * Return: 0 on success and @cur is updated to the new console state.
|
||
+ * Otherwise an error code on failure.
|
||
+ *
|
||
+ * Errors:
|
||
+ *
|
||
+ * -EPERM: A panic is in progress and this is not the panic CPU
|
||
+ * or this context is no longer the waiter.
|
||
+ *
|
||
+ * -EBUSY: The console is still locked. The caller should
|
||
+ * continue waiting.
|
||
+ *
|
||
+ * Note: The caller must still remove the request when an error has occurred
|
||
+ * except when this context is no longer the waiter.
|
||
+ */
|
||
+static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
|
||
+ struct nbcon_state *cur)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state new;
|
||
+
|
||
+ /* Note that the caller must still remove the request! */
|
||
+ if (other_cpu_in_panic())
|
||
+ return -EPERM;
|
||
+
|
||
+ /*
|
||
+ * Note that the waiter will also change if there was an unsafe
|
||
+ * hostile takeover.
|
||
+ */
|
||
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
|
||
+ return -EPERM;
|
||
+
|
||
+ /* If still locked, caller should continue waiting. */
|
||
+ if (cur->prio != NBCON_PRIO_NONE)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /*
|
||
+ * The previous owner should have never released ownership
|
||
+ * in an unsafe region.
|
||
+ */
|
||
+ WARN_ON_ONCE(cur->unsafe);
|
||
+
|
||
+ new.atom = cur->atom;
|
||
+ new.prio = ctxt->prio;
|
||
+ new.req_prio = NBCON_PRIO_NONE;
|
||
+ new.unsafe = cur->unsafe_takeover;
|
||
+ new.cpu = cpu;
|
||
+
|
||
+ if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
|
||
+ /*
|
||
+ * The acquire could fail only when it has been taken
|
||
+ * over by a higher priority context.
|
||
+ */
|
||
+ WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
|
||
+ return -EPERM;
|
||
+ }
|
||
+
|
||
+ /* Handover success. This context now owns the console. */
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_context_try_acquire_handover - Try to acquire via handover
|
||
+ * @ctxt: The context of the caller
|
||
+ * @cur: The current console state
|
||
+ *
|
||
+ * The function must be called only when the context has higher priority
|
||
+ * than the current owner and the console is in an unsafe state.
|
||
+ * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
|
||
+ *
|
||
+ * The function sets "req_prio" field to make the current owner aware of
|
||
+ * the request. Then it waits until the current owner releases the console,
|
||
+ * or an even higher context takes over the request, or timeout expires.
|
||
+ *
|
||
+ * The current owner checks the "req_prio" field on exit from the unsafe
|
||
+ * region and releases the console. It does not touch the "req_prio" field
|
||
+ * so that the console stays reserved for the waiter.
|
||
+ *
|
||
+ * Return: 0 on success. Otherwise, an error code on failure. Also @cur
|
||
+ * is updated to the latest state when failed to modify it.
|
||
+ *
|
||
+ * Errors:
|
||
+ *
|
||
+ * -EPERM: A panic is in progress and this is not the panic CPU.
|
||
+ * Or a higher priority context has taken over the
|
||
+ * console or the handover request.
|
||
+ *
|
||
+ * -EBUSY: The current owner is on the same CPU so that the hand
|
||
+ * shake could not work. Or the current owner is not
|
||
+ * willing to wait (zero timeout). Or the console does
|
||
+ * not enter the safe state before timeout passed. The
|
||
+ * caller might still use the unsafe hostile takeover
|
||
+ * when allowed.
|
||
+ *
|
||
+ * -EAGAIN: @cur has changed when creating the handover request.
|
||
+ * The caller should retry with direct acquire.
|
||
+ */
|
||
+static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
|
||
+ struct nbcon_state *cur)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state new;
|
||
+ int timeout;
|
||
+ int request_err = -EBUSY;
|
||
+
|
||
+ /*
|
||
+ * Check that the handover is called when the direct acquire failed
|
||
+ * with -EBUSY.
|
||
+ */
|
||
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
|
||
+ WARN_ON_ONCE(!cur->unsafe);
|
||
+
|
||
+ /* Handover is not possible on the same CPU. */
|
||
+ if (cur->cpu == cpu)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /*
|
||
+ * Console stays unsafe after an unsafe takeover until re-initialized.
|
||
+ * Waiting is not going to help in this case.
|
||
+ */
|
||
+ if (cur->unsafe_takeover)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /* Is the caller willing to wait? */
|
||
+ if (ctxt->spinwait_max_us == 0)
|
||
+ return -EBUSY;
|
||
+
|
||
+ /*
|
||
+ * Setup a request for the handover. The caller should try to acquire
|
||
+ * the console directly when the current state has been modified.
|
||
+ */
|
||
+ new.atom = cur->atom;
|
||
+ new.req_prio = ctxt->prio;
|
||
+ if (!nbcon_state_try_cmpxchg(con, cur, &new))
|
||
+ return -EAGAIN;
|
||
+
|
||
+ cur->atom = new.atom;
|
||
+
|
||
+ /* Wait until there is no owner and then acquire the console. */
|
||
+ for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
|
||
+ /* On successful acquire, this request is cleared. */
|
||
+ request_err = nbcon_context_try_acquire_requested(ctxt, cur);
|
||
+ if (!request_err)
|
||
+ return 0;
|
||
+
|
||
+ /*
|
||
+ * If the acquire should be aborted, it must be ensured
|
||
+ * that the request is removed before returning to caller.
|
||
+ */
|
||
+ if (request_err == -EPERM)
|
||
+ break;
|
||
+
|
||
+ udelay(1);
|
||
+
|
||
+ /* Re-read the state because some time has passed. */
|
||
+ nbcon_state_read(con, cur);
|
||
+ }
|
||
+
|
||
+ /* Timed out or aborted. Carefully remove handover request. */
|
||
+ do {
|
||
+ /*
|
||
+ * No need to remove request if there is a new waiter. This
|
||
+ * can only happen if a higher priority context has taken over
|
||
+ * the console or the handover request.
|
||
+ */
|
||
+ if (!nbcon_waiter_matches(cur, ctxt->prio))
|
||
+ return -EPERM;
|
||
+
|
||
+ /* Unset request for handover. */
|
||
+ new.atom = cur->atom;
|
||
+ new.req_prio = NBCON_PRIO_NONE;
|
||
+ if (nbcon_state_try_cmpxchg(con, cur, &new)) {
|
||
+ /*
|
||
+ * Request successfully unset. Report failure of
|
||
+ * acquiring via handover.
|
||
+ */
|
||
+ cur->atom = new.atom;
|
||
+ return request_err;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Unable to remove request. Try to acquire in case
|
||
+ * the owner has released the lock.
|
||
+ */
|
||
+ } while (nbcon_context_try_acquire_requested(ctxt, cur));
|
||
+
|
||
+ /* Lucky timing. The acquire succeeded while removing the request. */
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
|
||
+ * @ctxt: The context of the caller
|
||
+ * @cur: The current console state
|
||
+ *
|
||
+ * Acquire the console even in the unsafe state.
|
||
+ *
|
||
+ * It can be permitted by setting the 'allow_unsafe_takeover' field only
|
||
+ * by the final attempt to flush messages in panic().
|
||
+ *
|
||
+ * Return: 0 on success. -EPERM when not allowed by the context.
|
||
+ */
|
||
+static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
|
||
+ struct nbcon_state *cur)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state new;
|
||
+
|
||
+ if (!ctxt->allow_unsafe_takeover)
|
||
+ return -EPERM;
|
||
+
|
||
+ /* Ensure caller is allowed to perform unsafe hostile takeovers. */
|
||
+ if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
|
||
+ return -EPERM;
|
||
+
|
||
+ /*
|
||
+ * Check that try_acquire_direct() and try_acquire_handover() returned
|
||
+ * -EBUSY in the right situation.
|
||
+ */
|
||
+ WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
|
||
+ WARN_ON_ONCE(cur->unsafe != true);
|
||
+
|
||
+ do {
|
||
+ new.atom = cur->atom;
|
||
+ new.cpu = cpu;
|
||
+ new.prio = ctxt->prio;
|
||
+ new.unsafe |= cur->unsafe_takeover;
|
||
+ new.unsafe_takeover |= cur->unsafe;
|
||
+
|
||
+ } while (!nbcon_state_try_cmpxchg(con, cur, &new));
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+static struct printk_buffers panic_nbcon_pbufs;
|
||
+
|
||
+/**
|
||
+ * nbcon_context_try_acquire - Try to acquire nbcon console
|
||
+ * @ctxt: The context of the caller
|
||
+ *
|
||
+ * Context: Any context which could not be migrated to another CPU.
|
||
+ * Return: True if the console was acquired. False otherwise.
|
||
+ *
|
||
+ * If the caller allowed an unsafe hostile takeover, on success the
|
||
+ * caller should check the current console state to see if it is
|
||
+ * in an unsafe state. Otherwise, on success the caller may assume
|
||
+ * the console is not in an unsafe state.
|
||
+ */
|
||
+static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state cur;
|
||
+ int err;
|
||
+
|
||
+ nbcon_state_read(con, &cur);
|
||
+try_again:
|
||
+ err = nbcon_context_try_acquire_direct(ctxt, &cur);
|
||
+ if (err != -EBUSY)
|
||
+ goto out;
|
||
+
|
||
+ err = nbcon_context_try_acquire_handover(ctxt, &cur);
|
||
+ if (err == -EAGAIN)
|
||
+ goto try_again;
|
||
+ if (err != -EBUSY)
|
||
+ goto out;
|
||
+
|
||
+ err = nbcon_context_try_acquire_hostile(ctxt, &cur);
|
||
+out:
|
||
+ if (err)
|
||
+ return false;
|
||
+
|
||
+ /* Acquire succeeded. */
|
||
+
|
||
+ /* Assign the appropriate buffer for this context. */
|
||
+ if (atomic_read(&panic_cpu) == cpu)
|
||
+ ctxt->pbufs = &panic_nbcon_pbufs;
|
||
+ else
|
||
+ ctxt->pbufs = con->pbufs;
|
||
+
|
||
+ /* Set the record sequence for this context to print. */
|
||
+ ctxt->seq = nbcon_seq_read(ctxt->console);
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
|
||
+ int expected_prio)
|
||
+{
|
||
+ /*
|
||
+ * Since consoles can only be acquired by higher priorities,
|
||
+ * owning contexts are uniquely identified by @prio. However,
|
||
+ * since contexts can unexpectedly lose ownership, it is
|
||
+ * possible that later another owner appears with the same
|
||
+ * priority. For this reason @cpu is also needed.
|
||
+ */
|
||
+
|
||
+ if (cur->prio != expected_prio)
|
||
+ return false;
|
||
+
|
||
+ if (cur->cpu != expected_cpu)
|
||
+ return false;
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_context_release - Release the console
|
||
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
|
||
+ */
|
||
+static void nbcon_context_release(struct nbcon_context *ctxt)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state cur;
|
||
+ struct nbcon_state new;
|
||
+
|
||
+ nbcon_state_read(con, &cur);
|
||
+
|
||
+ do {
|
||
+ if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
|
||
+ break;
|
||
+
|
||
+ new.atom = cur.atom;
|
||
+ new.prio = NBCON_PRIO_NONE;
|
||
+
|
||
+ /*
|
||
+ * If @unsafe_takeover is set, it is kept set so that
|
||
+ * the state remains permanently unsafe.
|
||
+ */
|
||
+ new.unsafe |= cur.unsafe_takeover;
|
||
+
|
||
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
|
||
+
|
||
+ ctxt->pbufs = NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_context_can_proceed - Check whether ownership can proceed
|
||
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
|
||
+ * @cur: The current console state
|
||
+ *
|
||
+ * Return: True if this context still owns the console. False if
|
||
+ * ownership was handed over or taken.
|
||
+ *
|
||
+ * Must be invoked when entering the unsafe state to make sure that it still
|
||
+ * owns the lock. Also must be invoked when exiting the unsafe context
|
||
+ * to eventually free the lock for a higher priority context which asked
|
||
+ * for the friendly handover.
|
||
+ *
|
||
+ * It can be called inside an unsafe section when the console is just
|
||
+ * temporary in safe state instead of exiting and entering the unsafe
|
||
+ * state.
|
||
+ *
|
||
+ * Also it can be called in the safe context before doing an expensive
|
||
+ * safe operation. It does not make sense to do the operation when
|
||
+ * a higher priority context took the lock.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context.
|
||
+ */
|
||
+static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
|
||
+{
|
||
+ unsigned int cpu = smp_processor_id();
|
||
+
|
||
+ /* Make sure this context still owns the console. */
|
||
+ if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
|
||
+ return false;
|
||
+
|
||
+ /* The console owner can proceed if there is no waiter. */
|
||
+ if (cur->req_prio == NBCON_PRIO_NONE)
|
||
+ return true;
|
||
+
|
||
+ /*
|
||
+ * A console owner within an unsafe region is always allowed to
|
||
+ * proceed, even if there are waiters. It can perform a handover
|
||
+ * when exiting the unsafe region. Otherwise the waiter will
|
||
+ * need to perform an unsafe hostile takeover.
|
||
+ */
|
||
+ if (cur->unsafe)
|
||
+ return true;
|
||
+
|
||
+ /* Waiters always have higher priorities than owners. */
|
||
+ WARN_ON_ONCE(cur->req_prio <= cur->prio);
|
||
+
|
||
+ /*
|
||
+ * Having a safe point for take over and eventually a few
|
||
+ * duplicated characters or a full line is way better than a
|
||
+ * hostile takeover. Post processing can take care of the garbage.
|
||
+ * Release and hand over.
|
||
+ */
|
||
+ nbcon_context_release(ctxt);
|
||
+
|
||
+ /*
|
||
+ * It is not clear whether the waiter really took over ownership. The
|
||
+ * outermost callsite must make the final decision whether console
|
||
+ * ownership is needed for it to proceed. If yes, it must reacquire
|
||
+ * ownership (possibly hostile) before carefully proceeding.
|
||
+ *
|
||
+ * The calling context no longer owns the console so go back all the
|
||
+ * way instead of trying to implement reacquire heuristics in tons of
|
||
+ * places.
|
||
+ */
|
||
+ return false;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_can_proceed - Check whether ownership can proceed
|
||
+ * @wctxt: The write context that was handed to the write function
|
||
+ *
|
||
+ * Return: True if this context still owns the console. False if
|
||
+ * ownership was handed over or taken.
|
||
+ *
|
||
+ * It is used in nbcon_enter_unsafe() to make sure that it still owns the
|
||
+ * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
|
||
+ * for a higher priority context which asked for the friendly handover.
|
||
+ *
|
||
+ * It can be called inside an unsafe section when the console is just
|
||
+ * temporary in safe state instead of exiting and entering the unsafe state.
|
||
+ *
|
||
+ * Also it can be called in the safe context before doing an expensive safe
|
||
+ * operation. It does not make sense to do the operation when a higher
|
||
+ * priority context took the lock.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context.
|
||
+ */
|
||
+bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state cur;
|
||
+
|
||
+ nbcon_state_read(con, &cur);
|
||
+
|
||
+ return nbcon_context_can_proceed(ctxt, &cur);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_can_proceed);
|
||
+
|
||
+#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
|
||
+#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
|
||
+
|
||
+/**
|
||
+ * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
|
||
+ * @ctxt: The nbcon context from nbcon_context_try_acquire()
|
||
+ * @unsafe: The new value for the unsafe bit
|
||
+ *
|
||
+ * Return: True if the unsafe state was updated and this context still
|
||
+ * owns the console. Otherwise false if ownership was handed
|
||
+ * over or taken.
|
||
+ *
|
||
+ * This function allows console owners to modify the unsafe status of the
|
||
+ * console.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context.
|
||
+ *
|
||
+ * Internal helper to avoid duplicated code.
|
||
+ */
|
||
+static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
|
||
+{
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state cur;
|
||
+ struct nbcon_state new;
|
||
+
|
||
+ nbcon_state_read(con, &cur);
|
||
+
|
||
+ do {
|
||
+ /*
|
||
+ * The unsafe bit must not be cleared if an
|
||
+ * unsafe hostile takeover has occurred.
|
||
+ */
|
||
+ if (!unsafe && cur.unsafe_takeover)
|
||
+ goto out;
|
||
+
|
||
+ if (!nbcon_context_can_proceed(ctxt, &cur))
|
||
+ return false;
|
||
+
|
||
+ new.atom = cur.atom;
|
||
+ new.unsafe = unsafe;
|
||
+ } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
|
||
+
|
||
+ cur.atom = new.atom;
|
||
+out:
|
||
+ return nbcon_context_can_proceed(ctxt, &cur);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_enter_unsafe - Enter an unsafe region in the driver
|
||
+ * @wctxt: The write context that was handed to the write function
|
||
+ *
|
||
+ * Return: True if this context still owns the console. False if
|
||
+ * ownership was handed over or taken.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context.
|
||
+ */
|
||
+bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+
|
||
+ return nbcon_context_enter_unsafe(ctxt);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
|
||
+
|
||
+/**
|
||
+ * nbcon_exit_unsafe - Exit an unsafe region in the driver
|
||
+ * @wctxt: The write context that was handed to the write function
|
||
+ *
|
||
+ * Return: True if this context still owns the console. False if
|
||
+ * ownership was handed over or taken.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context.
|
||
+ */
|
||
+bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+
|
||
+ return nbcon_context_exit_unsafe(ctxt);
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
|
||
+
|
||
+/**
|
||
+ * nbcon_reacquire - Reacquire a console after losing ownership
|
||
+ * @wctxt: The write context that was handed to the write function
|
||
+ *
|
||
+ * Since ownership can be lost at any time due to handover or takeover, a
|
||
+ * printing context _should_ be prepared to back out immediately and
|
||
+ * carefully. However, there are many scenarios where the context _must_
|
||
+ * reacquire ownership in order to finalize or revert hardware changes.
|
||
+ *
|
||
+ * This function allows a context to reacquire ownership using the same
|
||
+ * priority as its previous ownership.
|
||
+ *
|
||
+ * Note that for printing contexts, after a successful reacquire the
|
||
+ * context will have no output buffer because that has been lost. This
|
||
+ * function cannot be used to resume printing.
|
||
+ */
|
||
+void nbcon_reacquire(struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+ struct console *con = ctxt->console;
|
||
+ struct nbcon_state cur;
|
||
+
|
||
+ while (!nbcon_context_try_acquire(ctxt))
|
||
+ cpu_relax();
|
||
+
|
||
+ wctxt->outbuf = NULL;
|
||
+ wctxt->len = 0;
|
||
+ nbcon_state_read(con, &cur);
|
||
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_reacquire);
|
||
+
|
||
+/**
|
||
+ * nbcon_emit_next_record - Emit a record in the acquired context
|
||
+ * @wctxt: The write context that will be handed to the write function
|
||
+ * @use_atomic: True if the write_atomic callback is to be used
|
||
+ *
|
||
+ * Return: True if this context still owns the console. False if
|
||
+ * ownership was handed over or taken.
|
||
+ *
|
||
+ * When this function returns false then the calling context no longer owns
|
||
+ * the console and is no longer allowed to go forward. In this case it must
|
||
+ * back out immediately and carefully. The buffer content is also no longer
|
||
+ * trusted since it no longer belongs to the calling context. If the caller
|
||
+ * wants to do more it must reacquire the console first.
|
||
+ *
|
||
+ * When true is returned, @wctxt->ctxt.backlog indicates whether there are
|
||
+ * still records pending in the ringbuffer,
|
||
+ */
|
||
+static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+ struct console *con = ctxt->console;
|
||
+ bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
|
||
+ struct printk_message pmsg = {
|
||
+ .pbufs = ctxt->pbufs,
|
||
+ };
|
||
+ unsigned long con_dropped;
|
||
+ struct nbcon_state cur;
|
||
+ unsigned long dropped;
|
||
+ bool done = false;
|
||
+
|
||
+ /*
|
||
+ * The printk buffers are filled within an unsafe section. This
|
||
+ * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
|
||
+ * clobbering each other.
|
||
+ */
|
||
+
|
||
+ if (!nbcon_context_enter_unsafe(ctxt))
|
||
+ return false;
|
||
+
|
||
+ ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
|
||
+ if (!ctxt->backlog)
|
||
+ return nbcon_context_exit_unsafe(ctxt);
|
||
+
|
||
+ /*
|
||
+ * @con->dropped is not protected in case of an unsafe hostile
|
||
+ * takeover. In that situation the update can be racy so
|
||
+ * annotate it accordingly.
|
||
+ */
|
||
+ con_dropped = data_race(READ_ONCE(con->dropped));
|
||
+
|
||
+ dropped = con_dropped + pmsg.dropped;
|
||
+ if (dropped && !is_extended)
|
||
+ console_prepend_dropped(&pmsg, dropped);
|
||
+
|
||
+ if (!nbcon_context_exit_unsafe(ctxt))
|
||
+ return false;
|
||
+
|
||
+ /* For skipped records just update seq/dropped in @con. */
|
||
+ if (pmsg.outbuf_len == 0)
|
||
+ goto update_con;
|
||
+
|
||
+ /* Initialize the write context for driver callbacks. */
|
||
+ wctxt->outbuf = &pmsg.pbufs->outbuf[0];
|
||
+ wctxt->len = pmsg.outbuf_len;
|
||
+ nbcon_state_read(con, &cur);
|
||
+ wctxt->unsafe_takeover = cur.unsafe_takeover;
|
||
+
|
||
+ if (use_atomic &&
|
||
+ con->write_atomic) {
|
||
+ done = con->write_atomic(con, wctxt);
|
||
+
|
||
+ } else if (!use_atomic &&
|
||
+ con->write_thread &&
|
||
+ con->kthread) {
|
||
+ WARN_ON_ONCE(con->kthread != current);
|
||
+ done = con->write_thread(con, wctxt);
|
||
+ }
|
||
+
|
||
+ if (!done) {
|
||
+ /*
|
||
+ * The emit was aborted, probably due to a loss of ownership.
|
||
+ * Ensure ownership was lost or released before reporting the
|
||
+ * loss.
|
||
+ */
|
||
+ nbcon_context_release(ctxt);
|
||
+ return false;
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * Since any dropped message was successfully output, reset the
|
||
+ * dropped count for the console.
|
||
+ */
|
||
+ dropped = 0;
|
||
+update_con:
|
||
+ /*
|
||
+ * The dropped count and the sequence number are updated within an
|
||
+ * unsafe section. This limits update races to the panic context and
|
||
+ * allows the panic context to win.
|
||
+ */
|
||
+
|
||
+ if (!nbcon_context_enter_unsafe(ctxt))
|
||
+ return false;
|
||
+
|
||
+ if (dropped != con_dropped) {
|
||
+ /* Counterpart to the READ_ONCE() above. */
|
||
+ WRITE_ONCE(con->dropped, dropped);
|
||
+ }
|
||
+
|
||
+ nbcon_seq_try_update(ctxt, pmsg.seq + 1);
|
||
+
|
||
+ return nbcon_context_exit_unsafe(ctxt);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
|
||
+ * @con: Console to operate on
|
||
+ * @ctxt: The acquire context that contains the state
|
||
+ * at console_acquire()
|
||
+ *
|
||
+ * Return: True if the thread should shutdown or if the console is
|
||
+ * allowed to print and a record is available. False otherwise.
|
||
+ *
|
||
+ * After the thread wakes up, it must first check if it should shutdown before
|
||
+ * attempting any printing.
|
||
+ */
|
||
+static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
|
||
+{
|
||
+ bool is_usable;
|
||
+ short flags;
|
||
+ int cookie;
|
||
+
|
||
+ if (kthread_should_stop())
|
||
+ return true;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+ flags = console_srcu_read_flags(con);
|
||
+ is_usable = console_is_usable(con, flags, false);
|
||
+ console_srcu_read_unlock(cookie);
|
||
+
|
||
+ if (!is_usable)
|
||
+ return false;
|
||
+
|
||
+ /* Bring the sequence in @ctxt up to date */
|
||
+ ctxt->seq = nbcon_seq_read(con);
|
||
+
|
||
+ return prb_read_valid(prb, ctxt->seq, NULL);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_kthread_func - The printer thread function
|
||
+ * @__console: Console to operate on
|
||
+ */
|
||
+static int nbcon_kthread_func(void *__console)
|
||
+{
|
||
+ struct console *con = __console;
|
||
+ struct nbcon_write_context wctxt = {
|
||
+ .ctxt.console = con,
|
||
+ .ctxt.prio = NBCON_PRIO_NORMAL,
|
||
+ };
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||
+ unsigned long flags;
|
||
+ short con_flags;
|
||
+ bool backlog;
|
||
+ int cookie;
|
||
+ int ret;
|
||
+
|
||
+wait_for_event:
|
||
+ /*
|
||
+ * Guarantee this task is visible on the rcuwait before
|
||
+ * checking the wake condition.
|
||
+ *
|
||
+ * The full memory barrier within set_current_state() of
|
||
+ * ___rcuwait_wait_event() pairs with the full memory
|
||
+ * barrier within rcuwait_has_sleeper().
|
||
+ *
|
||
+ * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
|
||
+ */
|
||
+ ret = rcuwait_wait_event(&con->rcuwait,
|
||
+ nbcon_kthread_should_wakeup(con, ctxt),
|
||
+ TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
|
||
+
|
||
+ if (kthread_should_stop())
|
||
+ return 0;
|
||
+
|
||
+ /* Wait was interrupted by a spurious signal, go back to sleep. */
|
||
+ if (ret)
|
||
+ goto wait_for_event;
|
||
+
|
||
+ do {
|
||
+ backlog = false;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+
|
||
+ con_flags = console_srcu_read_flags(con);
|
||
+
|
||
+ if (console_is_usable(con, con_flags, false)) {
|
||
+ con->driver_enter(con, &flags);
|
||
+
|
||
+ /*
|
||
+ * Ensure this stays on the CPU to make handover and
|
||
+ * takeover possible.
|
||
+ */
|
||
+ cant_migrate();
|
||
+
|
||
+ if (nbcon_context_try_acquire(ctxt)) {
|
||
+ /*
|
||
+ * If the emit fails, this context is no
|
||
+ * longer the owner.
|
||
+ */
|
||
+ if (nbcon_emit_next_record(&wctxt, false)) {
|
||
+ nbcon_context_release(ctxt);
|
||
+ backlog = ctxt->backlog;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ con->driver_exit(con, flags);
|
||
+ }
|
||
+
|
||
+ console_srcu_read_unlock(cookie);
|
||
+
|
||
+ } while (backlog);
|
||
+
|
||
+ goto wait_for_event;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_irq_work - irq work to wake printk thread
|
||
+ * @irq_work: The irq work to operate on
|
||
+ */
|
||
+static void nbcon_irq_work(struct irq_work *irq_work)
|
||
+{
|
||
+ struct console *con = container_of(irq_work, struct console, irq_work);
|
||
+
|
||
+ nbcon_kthread_wake(con);
|
||
+}
|
||
+
|
||
+static inline bool rcuwait_has_sleeper(struct rcuwait *w)
|
||
+{
|
||
+ bool has_sleeper;
|
||
+
|
||
+ rcu_read_lock();
|
||
+ /*
|
||
+ * Guarantee any new records can be seen by tasks preparing to wait
|
||
+ * before this context checks if the rcuwait is empty.
|
||
+ *
|
||
+ * This full memory barrier pairs with the full memory barrier within
|
||
+ * set_current_state() of ___rcuwait_wait_event(), which is called
|
||
+ * after prepare_to_rcuwait() adds the waiter but before it has
|
||
+ * checked the wait condition.
|
||
+ *
|
||
+ * This pairs with nbcon_kthread_func:A.
|
||
+ */
|
||
+ smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
|
||
+ has_sleeper = !!rcu_dereference(w->task);
|
||
+ rcu_read_unlock();
|
||
+
|
||
+ return has_sleeper;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_wake_threads - Wake up printing threads using irq_work
|
||
+ */
|
||
+void nbcon_wake_threads(void)
|
||
+{
|
||
+ struct console *con;
|
||
+ int cookie;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+ for_each_console_srcu(con) {
|
||
+ /*
|
||
+ * Only schedule irq_work if the printing thread is
|
||
+ * actively waiting. If not waiting, the thread will
|
||
+ * notice by itself that it has work to do.
|
||
+ */
|
||
+ if (con->kthread && rcuwait_has_sleeper(&con->rcuwait))
|
||
+ irq_work_queue(&con->irq_work);
|
||
+ }
|
||
+ console_srcu_read_unlock(cookie);
|
||
+}
|
||
+
|
||
+/* Track the nbcon emergency nesting per CPU. */
|
||
+static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
|
||
+static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
|
||
+
|
||
+/**
|
||
+ * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
|
||
+ *
|
||
+ * Return: Either a pointer to the per CPU emergency nesting counter of
|
||
+ * the current CPU or to the init data during early boot.
|
||
+ */
|
||
+static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
|
||
+{
|
||
+ /*
|
||
+ * The value of __printk_percpu_data_ready gets set in normal
|
||
+ * context and before SMP initialization. As a result it could
|
||
+ * never change while inside an nbcon emergency section.
|
||
+ */
|
||
+ if (!printk_percpu_data_ready())
|
||
+ return &early_nbcon_pcpu_emergency_nesting;
|
||
+
|
||
+ return this_cpu_ptr(&nbcon_pcpu_emergency_nesting);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_atomic_emit_one - Print one record for an nbcon console using the
|
||
+ * write_atomic() callback
|
||
+ * @wctxt: An initialized write context struct to use
|
||
+ * for this context
|
||
+ *
|
||
+ * Return: False if the given console could not print a record or there
|
||
+ * are no more records to print, otherwise true.
|
||
+ *
|
||
+ * This is an internal helper to handle the locking of the console before
|
||
+ * calling nbcon_emit_next_record().
|
||
+ */
|
||
+static bool nbcon_atomic_emit_one(struct nbcon_write_context *wctxt)
|
||
+{
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
|
||
+
|
||
+ if (!nbcon_context_try_acquire(ctxt))
|
||
+ return false;
|
||
+
|
||
+ /*
|
||
+ * nbcon_emit_next_record() returns false when the console was
|
||
+ * handed over or taken over. In both cases the context is no
|
||
+ * longer valid.
|
||
+ */
|
||
+ if (!nbcon_emit_next_record(wctxt, true))
|
||
+ return false;
|
||
+
|
||
+ nbcon_context_release(ctxt);
|
||
+
|
||
+ return ctxt->backlog;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
|
||
+ * printing on the current CPU
|
||
+ *
|
||
+ * Context: Any context which could not be migrated to another CPU.
|
||
+ * Return: The nbcon_prio to use for acquiring an nbcon console in this
|
||
+ * context for printing.
|
||
+ */
|
||
+enum nbcon_prio nbcon_get_default_prio(void)
|
||
+{
|
||
+ unsigned int *cpu_emergency_nesting;
|
||
+
|
||
+ if (this_cpu_in_panic())
|
||
+ return NBCON_PRIO_PANIC;
|
||
+
|
||
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||
+ if (*cpu_emergency_nesting)
|
||
+ return NBCON_PRIO_EMERGENCY;
|
||
+
|
||
+ return NBCON_PRIO_NORMAL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_atomic_emit_next_record - Print one record for an nbcon console
|
||
+ * using the write_atomic() callback
|
||
+ * @con: The console to print on
|
||
+ * @handover: Will be set to true if a printk waiter has taken over the
|
||
+ * console_lock, in which case the caller is no longer holding
|
||
+ * both the console_lock and the SRCU read lock. Otherwise it
|
||
+ * is set to false.
|
||
+ * @cookie: The cookie from the SRCU read lock.
|
||
+ *
|
||
+ * Context: Any context which could not be migrated to another CPU.
|
||
+ * Return: True if a record could be printed, otherwise false.
|
||
+ *
|
||
+ * This function is meant to be called by console_flush_all() to print records
|
||
+ * on nbcon consoles using the write_atomic() callback. Essentially it is the
|
||
+ * nbcon version of console_emit_next_record().
|
||
+ */
|
||
+bool nbcon_atomic_emit_next_record(struct console *con, bool *handover, int cookie)
|
||
+{
|
||
+ struct nbcon_write_context wctxt = { };
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||
+ unsigned long driver_flags;
|
||
+ bool progress = false;
|
||
+ unsigned long flags;
|
||
+
|
||
+ *handover = false;
|
||
+
|
||
+ /* Use the same locking order as console_emit_next_record(). */
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ printk_safe_enter_irqsave(flags);
|
||
+ console_lock_spinning_enable();
|
||
+ stop_critical_timings();
|
||
+ }
|
||
+
|
||
+ con->driver_enter(con, &driver_flags);
|
||
+ cant_migrate();
|
||
+
|
||
+ ctxt->console = con;
|
||
+ ctxt->prio = nbcon_get_default_prio();
|
||
+
|
||
+ progress = nbcon_atomic_emit_one(&wctxt);
|
||
+
|
||
+ con->driver_exit(con, driver_flags);
|
||
+
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ start_critical_timings();
|
||
+ *handover = console_lock_spinning_disable_and_check(cookie);
|
||
+ printk_safe_exit_irqrestore(flags);
|
||
+ }
|
||
+
|
||
+ return progress;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * __nbcon_atomic_flush_all - Flush all nbcon consoles using their
|
||
+ * write_atomic() callback
|
||
+ * @stop_seq: Flush up until this record
|
||
+ * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
|
||
+ */
|
||
+static void __nbcon_atomic_flush_all(u64 stop_seq, bool allow_unsafe_takeover)
|
||
+{
|
||
+ struct nbcon_write_context wctxt = { };
|
||
+ struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
|
||
+ struct console *con;
|
||
+ bool any_progress;
|
||
+ int cookie;
|
||
+
|
||
+ do {
|
||
+ any_progress = false;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+ for_each_console_srcu(con) {
|
||
+ short flags = console_srcu_read_flags(con);
|
||
+ unsigned long irq_flags;
|
||
+
|
||
+ if (!(flags & CON_NBCON))
|
||
+ continue;
|
||
+
|
||
+ if (!console_is_usable(con, flags, true))
|
||
+ continue;
|
||
+
|
||
+ if (nbcon_seq_read(con) >= stop_seq)
|
||
+ continue;
|
||
+
|
||
+ memset(ctxt, 0, sizeof(*ctxt));
|
||
+ ctxt->console = con;
|
||
+ ctxt->spinwait_max_us = 2000;
|
||
+ ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
|
||
+
|
||
+ /*
|
||
+ * Atomic flushing does not use console driver
|
||
+ * synchronization (i.e. it does not hold the port
|
||
+ * lock for uart consoles). Therefore IRQs must be
|
||
+ * disabled to avoid being interrupted and then
|
||
+ * calling into a driver that will deadlock trying
|
||
+ * acquire console ownership.
|
||
+ *
|
||
+ * This also disables migration in order to get the
|
||
+ * current CPU priority.
|
||
+ */
|
||
+ local_irq_save(irq_flags);
|
||
+
|
||
+ ctxt->prio = nbcon_get_default_prio();
|
||
+
|
||
+ any_progress |= nbcon_atomic_emit_one(&wctxt);
|
||
+
|
||
+ local_irq_restore(irq_flags);
|
||
+ }
|
||
+ console_srcu_read_unlock(cookie);
|
||
+ } while (any_progress);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_atomic_flush_all - Flush all nbcon consoles using their
|
||
+ * write_atomic() callback
|
||
+ *
|
||
+ * Flush the backlog up through the currently newest record. Any new
|
||
+ * records added while flushing will not be flushed. This is to avoid
|
||
+ * one CPU printing unbounded because other CPUs continue to add records.
|
||
+ */
|
||
+void nbcon_atomic_flush_all(void)
|
||
+{
|
||
+ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), false);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
|
||
+ * write_atomic() callback and allowing unsafe hostile takeovers
|
||
+ *
|
||
+ * Flush the backlog up through the currently newest record. Unsafe hostile
|
||
+ * takeovers will be performed, if necessary.
|
||
+ */
|
||
+void nbcon_atomic_flush_unsafe(void)
|
||
+{
|
||
+ __nbcon_atomic_flush_all(prb_next_reserve_seq(prb), true);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
|
||
+ * messages for that CPU are only stored
|
||
+ *
|
||
+ * Upon exiting the emergency section, all stored messages are flushed.
|
||
+ *
|
||
+ * Context: Any context. Disables preemption.
|
||
+ *
|
||
+ * When within an emergency section, no printing occurs on that CPU. This
|
||
+ * is to allow all emergency messages to be dumped into the ringbuffer before
|
||
+ * flushing the ringbuffer. The actual printing occurs when exiting the
|
||
+ * outermost emergency section.
|
||
+ */
|
||
+void nbcon_cpu_emergency_enter(void)
|
||
+{
|
||
+ unsigned int *cpu_emergency_nesting;
|
||
+
|
||
+ preempt_disable();
|
||
+
|
||
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||
+ (*cpu_emergency_nesting)++;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_cpu_emergency_exit - Exit an emergency section and flush the
|
||
+ * stored messages
|
||
+ *
|
||
+ * Flushing only occurs when exiting all nesting for the CPU.
|
||
+ *
|
||
+ * Context: Any context. Enables preemption.
|
||
+ */
|
||
+void nbcon_cpu_emergency_exit(void)
|
||
+{
|
||
+ unsigned int *cpu_emergency_nesting;
|
||
+ bool do_trigger_flush = false;
|
||
+
|
||
+ cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
|
||
+
|
||
+ WARN_ON_ONCE(*cpu_emergency_nesting == 0);
|
||
+
|
||
+ if (*cpu_emergency_nesting == 1)
|
||
+ do_trigger_flush = true;
|
||
+
|
||
+ /* Undo the nesting count of nbcon_cpu_emergency_enter(). */
|
||
+ (*cpu_emergency_nesting)--;
|
||
+
|
||
+ preempt_enable();
|
||
+
|
||
+ if (do_trigger_flush)
|
||
+ printk_trigger_flush();
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_kthread_stop - Stop a printer thread
|
||
+ * @con: Console to operate on
|
||
+ */
|
||
+static void nbcon_kthread_stop(struct console *con)
|
||
+{
|
||
+ lockdep_assert_console_list_lock_held();
|
||
+
|
||
+ if (!con->kthread)
|
||
+ return;
|
||
+
|
||
+ kthread_stop(con->kthread);
|
||
+ con->kthread = NULL;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_kthread_create - Create a printer thread
|
||
+ * @con: Console to operate on
|
||
+ *
|
||
+ * If it fails, let the console proceed. The atomic part might
|
||
+ * be usable and useful.
|
||
+ */
|
||
+void nbcon_kthread_create(struct console *con)
|
||
+{
|
||
+ struct task_struct *kt;
|
||
+
|
||
+ lockdep_assert_console_list_lock_held();
|
||
+
|
||
+ if (!(con->flags & CON_NBCON) || !con->write_thread)
|
||
+ return;
|
||
+
|
||
+ if (!printk_threads_enabled || con->kthread)
|
||
+ return;
|
||
+
|
||
+ /*
|
||
+ * Printer threads cannot be started as long as any boot console is
|
||
+ * registered because there is no way to synchronize the hardware
|
||
+ * registers between boot console code and regular console code.
|
||
+ */
|
||
+ if (have_boot_console)
|
||
+ return;
|
||
+
|
||
+ kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
|
||
+ if (IS_ERR(kt)) {
|
||
+ con_printk(KERN_ERR, con, "failed to start printing thread\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ con->kthread = kt;
|
||
+
|
||
+ /*
|
||
+ * It is important that console printing threads are scheduled
|
||
+ * shortly after a printk call and with generous runtime budgets.
|
||
+ */
|
||
+ sched_set_normal(con->kthread, -20);
|
||
+}
|
||
+
|
||
+static int __init printk_setup_threads(void)
|
||
+{
|
||
+ struct console *con;
|
||
+
|
||
+ console_list_lock();
|
||
+ printk_threads_enabled = true;
|
||
+ for_each_console(con)
|
||
+ nbcon_kthread_create(con);
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && printing_via_unlock)
|
||
+ nbcon_legacy_kthread_create();
|
||
+ console_list_unlock();
|
||
+ return 0;
|
||
+}
|
||
+early_initcall(printk_setup_threads);
|
||
+
|
||
+/**
|
||
+ * nbcon_alloc - Allocate buffers needed by the nbcon console
|
||
+ * @con: Console to allocate buffers for
|
||
+ *
|
||
+ * Return: True on success. False otherwise and the console cannot
|
||
+ * be used.
|
||
+ *
|
||
+ * This is not part of nbcon_init() because buffer allocation must
|
||
+ * be performed earlier in the console registration process.
|
||
+ */
|
||
+bool nbcon_alloc(struct console *con)
|
||
+{
|
||
+ if (con->flags & CON_BOOT) {
|
||
+ /*
|
||
+ * Boot console printing is synchronized with legacy console
|
||
+ * printing, so boot consoles can share the same global printk
|
||
+ * buffers.
|
||
+ */
|
||
+ con->pbufs = &printk_shared_pbufs;
|
||
+ } else {
|
||
+ con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
|
||
+ if (!con->pbufs) {
|
||
+ con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
|
||
+ return false;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return true;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_init - Initialize the nbcon console specific data
|
||
+ * @con: Console to initialize
|
||
+ *
|
||
+ * nbcon_alloc() *must* be called and succeed before this function
|
||
+ * is called.
|
||
+ *
|
||
+ * This function expects that the legacy @con->seq has been set.
|
||
+ */
|
||
+void nbcon_init(struct console *con)
|
||
+{
|
||
+ struct nbcon_state state = { };
|
||
+
|
||
+ /* nbcon_alloc() must have been called and successful! */
|
||
+ BUG_ON(!con->pbufs);
|
||
+
|
||
+ rcuwait_init(&con->rcuwait);
|
||
+ init_irq_work(&con->irq_work, nbcon_irq_work);
|
||
+ nbcon_seq_force(con, con->seq);
|
||
+ nbcon_state_set(con, &state);
|
||
+ nbcon_kthread_create(con);
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_free - Free and cleanup the nbcon console specific data
|
||
+ * @con: Console to free/cleanup nbcon data
|
||
+ */
|
||
+void nbcon_free(struct console *con)
|
||
+{
|
||
+ struct nbcon_state state = { };
|
||
+
|
||
+ nbcon_kthread_stop(con);
|
||
+ nbcon_state_set(con, &state);
|
||
+
|
||
+ /* Boot consoles share global printk buffers. */
|
||
+ if (!(con->flags & CON_BOOT))
|
||
+ kfree(con->pbufs);
|
||
+
|
||
+ con->pbufs = NULL;
|
||
+}
|
||
+
|
||
+static inline bool uart_is_nbcon(struct uart_port *up)
|
||
+{
|
||
+ int cookie;
|
||
+ bool ret;
|
||
+
|
||
+ if (!uart_console(up))
|
||
+ return false;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+ ret = (console_srcu_read_flags(up->cons) & CON_NBCON);
|
||
+ console_srcu_read_unlock(cookie);
|
||
+ return ret;
|
||
+}
|
||
+
|
||
+/**
|
||
+ * nbcon_acquire - The second half of the port locking wrapper
|
||
+ * @up: The uart port whose @lock was locked
|
||
+ *
|
||
+ * The uart_port_lock() wrappers will first lock the spin_lock @up->lock.
|
||
+ * Then this function is called to implement nbcon-specific processing.
|
||
+ *
|
||
+ * If @up is an nbcon console, this console will be acquired and marked as
|
||
+ * unsafe. Otherwise this function does nothing.
|
||
+ *
|
||
+ * nbcon consoles acquired via the port lock wrapper always use priority
|
||
+ * NBCON_PRIO_NORMAL.
|
||
+ */
|
||
+void nbcon_acquire(struct uart_port *up)
|
||
+{
|
||
+ struct console *con = up->cons;
|
||
+ struct nbcon_context ctxt;
|
||
+
|
||
+ if (!uart_is_nbcon(up))
|
||
+ return;
|
||
+
|
||
+ WARN_ON_ONCE(up->nbcon_locked_port);
|
||
+
|
||
+ do {
|
||
+ do {
|
||
+ memset(&ctxt, 0, sizeof(ctxt));
|
||
+ ctxt.console = con;
|
||
+ ctxt.prio = NBCON_PRIO_NORMAL;
|
||
+ } while (!nbcon_context_try_acquire(&ctxt));
|
||
+
|
||
+ } while (!nbcon_context_enter_unsafe(&ctxt));
|
||
+
|
||
+ up->nbcon_locked_port = true;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_acquire);
|
||
+
|
||
+/**
|
||
+ * nbcon_release - The first half of the port unlocking wrapper
|
||
+ * @up: The uart port whose @lock is about to be unlocked
|
||
+ *
|
||
+ * The uart_port_unlock() wrappers will first call this function to implement
|
||
+ * nbcon-specific processing. Then afterwards the uart_port_unlock() wrappers
|
||
+ * will unlock the spin_lock @up->lock.
|
||
+ *
|
||
+ * If @up is an nbcon console, the console will be marked as safe and
|
||
+ * released. Otherwise this function does nothing.
|
||
+ *
|
||
+ * nbcon consoles acquired via the port lock wrapper always use priority
|
||
+ * NBCON_PRIO_NORMAL.
|
||
+ */
|
||
+void nbcon_release(struct uart_port *up)
|
||
+{
|
||
+ struct console *con = up->cons;
|
||
+ struct nbcon_context ctxt = {
|
||
+ .console = con,
|
||
+ .prio = NBCON_PRIO_NORMAL,
|
||
+ };
|
||
+
|
||
+ if (!up->nbcon_locked_port)
|
||
+ return;
|
||
+
|
||
+ if (nbcon_context_exit_unsafe(&ctxt))
|
||
+ nbcon_context_release(&ctxt);
|
||
+
|
||
+ up->nbcon_locked_port = false;
|
||
+}
|
||
+EXPORT_SYMBOL_GPL(nbcon_release);
|
||
+
|
||
+/**
|
||
+ * printk_kthread_shutdown - shutdown all threaded printers
|
||
+ *
|
||
+ * On system shutdown all threaded printers are stopped. This allows printk
|
||
+ * to transition back to atomic printing, thus providing a robust mechanism
|
||
+ * for the final shutdown/reboot messages to be output.
|
||
+ */
|
||
+static void printk_kthread_shutdown(void)
|
||
+{
|
||
+ struct console *con;
|
||
+
|
||
+ console_list_lock();
|
||
+ for_each_console(con) {
|
||
+ if (con->flags & CON_NBCON)
|
||
+ nbcon_kthread_stop(con);
|
||
+ }
|
||
+ console_list_unlock();
|
||
+}
|
||
+
|
||
+static struct syscore_ops printk_syscore_ops = {
|
||
+ .shutdown = printk_kthread_shutdown,
|
||
+};
|
||
+
|
||
+static int __init printk_init_ops(void)
|
||
+{
|
||
+ register_syscore_ops(&printk_syscore_ops);
|
||
+ return 0;
|
||
+}
|
||
+device_initcall(printk_init_ops);
|
||
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
||
index 8b04d0932..a760c46d3 100644
|
||
--- a/kernel/printk/printk.c
|
||
+++ b/kernel/printk/printk.c
|
||
@@ -102,12 +102,6 @@ DEFINE_STATIC_SRCU(console_srcu);
|
||
*/
|
||
int __read_mostly suppress_printk;
|
||
|
||
-/*
|
||
- * During panic, heavy printk by other CPUs can delay the
|
||
- * panic and risk deadlock on console resources.
|
||
- */
|
||
-static int __read_mostly suppress_panic_printk;
|
||
-
|
||
#ifdef CONFIG_LOCKDEP
|
||
static struct lockdep_map console_lock_dep_map = {
|
||
.name = "console_lock"
|
||
@@ -288,6 +282,7 @@ EXPORT_SYMBOL(console_list_unlock);
|
||
* Return: A cookie to pass to console_srcu_read_unlock().
|
||
*/
|
||
int console_srcu_read_lock(void)
|
||
+ __acquires(&console_srcu)
|
||
{
|
||
return srcu_read_lock_nmisafe(&console_srcu);
|
||
}
|
||
@@ -301,6 +296,7 @@ EXPORT_SYMBOL(console_srcu_read_lock);
|
||
* Counterpart to console_srcu_read_lock()
|
||
*/
|
||
void console_srcu_read_unlock(int cookie)
|
||
+ __releases(&console_srcu)
|
||
{
|
||
srcu_read_unlock_nmisafe(&console_srcu, cookie);
|
||
}
|
||
@@ -353,6 +349,29 @@ static bool panic_in_progress(void)
|
||
return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
|
||
}
|
||
|
||
+/* Return true if a panic is in progress on the current CPU. */
|
||
+bool this_cpu_in_panic(void)
|
||
+{
|
||
+ /*
|
||
+ * We can use raw_smp_processor_id() here because it is impossible for
|
||
+ * the task to be migrated to the panic_cpu, or away from it. If
|
||
+ * panic_cpu has already been set, and we're not currently executing on
|
||
+ * that CPU, then we never will be.
|
||
+ */
|
||
+ return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Return true if a panic is in progress on a remote CPU.
|
||
+ *
|
||
+ * On true, the local CPU should immediately release any printing resources
|
||
+ * that may be needed by the panic CPU.
|
||
+ */
|
||
+bool other_cpu_in_panic(void)
|
||
+{
|
||
+ return (panic_in_progress() && !this_cpu_in_panic());
|
||
+}
|
||
+
|
||
/*
|
||
* This is used for debugging the mess that is the VT code by
|
||
* keeping track if we have the console semaphore held. It's
|
||
@@ -444,8 +463,33 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
|
||
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
|
||
static DEFINE_MUTEX(syslog_lock);
|
||
|
||
+/*
|
||
+ * Specifies if a legacy console is registered. If legacy consoles are
|
||
+ * present, it is necessary to perform the console_lock/console_unlock dance
|
||
+ * whenever console flushing should occur.
|
||
+ */
|
||
+bool have_legacy_console;
|
||
+
|
||
+/*
|
||
+ * Specifies if an nbcon console is registered. If nbcon consoles are present,
|
||
+ * synchronous printing of legacy consoles will not occur during panic until
|
||
+ * the backtrace has been stored to the ringbuffer.
|
||
+ */
|
||
+bool have_nbcon_console;
|
||
+
|
||
+/*
|
||
+ * Specifies if a boot console is registered. If boot consoles are present,
|
||
+ * nbcon consoles cannot print simultaneously and must be synchronized by
|
||
+ * the console lock. This is because boot consoles and nbcon consoles may
|
||
+ * have mapped the same hardware.
|
||
+ */
|
||
+bool have_boot_console;
|
||
+
|
||
#ifdef CONFIG_PRINTK
|
||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||
+
|
||
+static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
|
||
+
|
||
/* All 3 protected by @syslog_lock. */
|
||
/* the next printk record to read by syslog(READ) or /proc/kmsg */
|
||
static u64 syslog_seq;
|
||
@@ -494,7 +538,7 @@ _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
|
||
|
||
static struct printk_ringbuffer printk_rb_dynamic;
|
||
|
||
-static struct printk_ringbuffer *prb = &printk_rb_static;
|
||
+struct printk_ringbuffer *prb = &printk_rb_static;
|
||
|
||
/*
|
||
* We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
|
||
@@ -698,9 +742,6 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
|
||
return len;
|
||
}
|
||
|
||
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
- bool is_extended, bool may_supress);
|
||
-
|
||
/* /dev/kmsg - userspace message inject/listen interface */
|
||
struct devkmsg_user {
|
||
atomic64_t seq;
|
||
@@ -1848,7 +1889,7 @@ static bool console_waiter;
|
||
* there may be a waiter spinning (like a spinlock). Also it must be
|
||
* ready to hand over the lock at the end of the section.
|
||
*/
|
||
-static void console_lock_spinning_enable(void)
|
||
+void console_lock_spinning_enable(void)
|
||
{
|
||
/*
|
||
* Do not use spinning in panic(). The panic CPU wants to keep the lock.
|
||
@@ -1887,7 +1928,7 @@ static void console_lock_spinning_enable(void)
|
||
*
|
||
* Return: 1 if the lock rights were passed, 0 otherwise.
|
||
*/
|
||
-static int console_lock_spinning_disable_and_check(int cookie)
|
||
+int console_lock_spinning_disable_and_check(int cookie)
|
||
{
|
||
int waiter;
|
||
|
||
@@ -2305,54 +2346,123 @@ int vprintk_store(int facility, int level,
|
||
return ret;
|
||
}
|
||
|
||
+static bool legacy_allow_panic_sync;
|
||
+
|
||
+/*
|
||
+ * This acts as a one-way switch to allow legacy consoles to print from
|
||
+ * the printk() caller context on a panic CPU.
|
||
+ */
|
||
+void printk_legacy_allow_panic_sync(void)
|
||
+{
|
||
+ legacy_allow_panic_sync = true;
|
||
+}
|
||
+
|
||
asmlinkage int vprintk_emit(int facility, int level,
|
||
const struct dev_printk_info *dev_info,
|
||
const char *fmt, va_list args)
|
||
{
|
||
+ bool do_trylock_unlock = printing_via_unlock &&
|
||
+ !IS_ENABLED(CONFIG_PREEMPT_RT);
|
||
int printed_len;
|
||
- bool in_sched = false;
|
||
|
||
/* Suppress unimportant messages after panic happens */
|
||
if (unlikely(suppress_printk))
|
||
return 0;
|
||
|
||
- if (unlikely(suppress_panic_printk) && other_cpu_in_panic())
|
||
+ /*
|
||
+ * The messages on the panic CPU are the most important. If
|
||
+ * non-panic CPUs are generating any messages, they will be
|
||
+ * silently dropped.
|
||
+ */
|
||
+ if (other_cpu_in_panic())
|
||
return 0;
|
||
|
||
if (level == LOGLEVEL_SCHED) {
|
||
level = LOGLEVEL_DEFAULT;
|
||
- in_sched = true;
|
||
+ /* If called from the scheduler, we can not call up(). */
|
||
+ do_trylock_unlock = false;
|
||
}
|
||
|
||
printk_delay(level);
|
||
|
||
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
|
||
|
||
- /* If called from the scheduler, we can not call up(). */
|
||
- if (!in_sched) {
|
||
+ if (!have_boot_console && have_nbcon_console) {
|
||
+ bool is_panic_context = this_cpu_in_panic();
|
||
+
|
||
+ /*
|
||
+ * In panic, the legacy consoles are not allowed to print from
|
||
+ * the printk calling context unless explicitly allowed. This
|
||
+ * gives the safe nbcon consoles a chance to print out all the
|
||
+ * panic messages first. This restriction only applies if
|
||
+ * there are nbcon consoles registered.
|
||
+ */
|
||
+ if (is_panic_context)
|
||
+ do_trylock_unlock &= legacy_allow_panic_sync;
|
||
+
|
||
+ /*
|
||
+ * There are situations where nbcon atomic printing should
|
||
+ * happen in the printk() caller context:
|
||
+ *
|
||
+ * - When this CPU is in panic.
|
||
+ *
|
||
+ * - When booting, before the printing threads have been
|
||
+ * started.
|
||
+ *
|
||
+ * - During shutdown, since the printing threads may not get
|
||
+ * a chance to print the final messages.
|
||
+ *
|
||
+ * Note that if boot consoles are registered, the
|
||
+ * console_lock/console_unlock dance must be relied upon
|
||
+ * instead because nbcon consoles cannot print simultaneously
|
||
+ * with boot consoles.
|
||
+ */
|
||
+ if (is_panic_context ||
|
||
+ !printk_threads_enabled ||
|
||
+ (system_state > SYSTEM_RUNNING)) {
|
||
+ nbcon_atomic_flush_all();
|
||
+ }
|
||
+ }
|
||
+
|
||
+ nbcon_wake_threads();
|
||
+
|
||
+ if (do_trylock_unlock) {
|
||
/*
|
||
* The caller may be holding system-critical or
|
||
* timing-sensitive locks. Disable preemption during
|
||
* printing of all remaining records to all consoles so that
|
||
* this context can return as soon as possible. Hopefully
|
||
* another printk() caller will take over the printing.
|
||
+ *
|
||
+ * Also, nbcon_get_default_prio() requires migration disabled.
|
||
*/
|
||
preempt_disable();
|
||
+
|
||
/*
|
||
- * Try to acquire and then immediately release the console
|
||
- * semaphore. The release will print out buffers. With the
|
||
- * spinning variant, this context tries to take over the
|
||
- * printing from another printing context.
|
||
+ * Do not emit for EMERGENCY priority. The console will be
|
||
+ * explicitly flushed when exiting the emergency section.
|
||
*/
|
||
- if (console_trylock_spinning())
|
||
- console_unlock();
|
||
+ if (nbcon_get_default_prio() == NBCON_PRIO_EMERGENCY) {
|
||
+ do_trylock_unlock = false;
|
||
+ } else {
|
||
+ /*
|
||
+ * Try to acquire and then immediately release the
|
||
+ * console semaphore. The release will print out
|
||
+ * buffers. With the spinning variant, this context
|
||
+ * tries to take over the printing from another
|
||
+ * printing context.
|
||
+ */
|
||
+ if (console_trylock_spinning())
|
||
+ console_unlock();
|
||
+ }
|
||
+
|
||
preempt_enable();
|
||
}
|
||
|
||
- if (in_sched)
|
||
- defer_console_output();
|
||
- else
|
||
+ if (do_trylock_unlock)
|
||
wake_up_klogd();
|
||
+ else
|
||
+ defer_console_output();
|
||
|
||
return printed_len;
|
||
}
|
||
@@ -2380,6 +2490,14 @@ EXPORT_SYMBOL(_printk);
|
||
static bool pr_flush(int timeout_ms, bool reset_on_progress);
|
||
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
|
||
|
||
+static struct task_struct *nbcon_legacy_kthread;
|
||
+
|
||
+static inline void wake_up_legacy_kthread(void)
|
||
+{
|
||
+ if (nbcon_legacy_kthread)
|
||
+ wake_up_interruptible(&legacy_wait);
|
||
+}
|
||
+
|
||
#else /* CONFIG_PRINTK */
|
||
|
||
#define printk_time false
|
||
@@ -2390,25 +2508,11 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||
|
||
static u64 syslog_seq;
|
||
|
||
-static size_t record_print_text(const struct printk_record *r,
|
||
- bool syslog, bool time)
|
||
-{
|
||
- return 0;
|
||
-}
|
||
-static ssize_t info_print_ext_header(char *buf, size_t size,
|
||
- struct printk_info *info)
|
||
-{
|
||
- return 0;
|
||
-}
|
||
-static ssize_t msg_print_ext_body(char *buf, size_t size,
|
||
- char *text, size_t text_len,
|
||
- struct dev_printk_info *dev_info) { return 0; }
|
||
-static void console_lock_spinning_enable(void) { }
|
||
-static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
|
||
-static bool suppress_message_printing(int level) { return false; }
|
||
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
|
||
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
|
||
|
||
+static inline void nbcon_legacy_kthread_create(void) { }
|
||
+static inline void wake_up_legacy_kthread(void) { }
|
||
#endif /* CONFIG_PRINTK */
|
||
|
||
#ifdef CONFIG_EARLY_PRINTK
|
||
@@ -2616,6 +2720,8 @@ void suspend_console(void)
|
||
void resume_console(void)
|
||
{
|
||
struct console *con;
|
||
+ short flags;
|
||
+ int cookie;
|
||
|
||
if (!console_suspend_enabled)
|
||
return;
|
||
@@ -2632,6 +2738,20 @@ void resume_console(void)
|
||
*/
|
||
synchronize_srcu(&console_srcu);
|
||
|
||
+ /*
|
||
+ * Since this runs in task context, wake the threaded printers
|
||
+ * directly rather than scheduling irq_work to do it.
|
||
+ */
|
||
+ cookie = console_srcu_read_lock();
|
||
+ for_each_console_srcu(con) {
|
||
+ flags = console_srcu_read_flags(con);
|
||
+ if (flags & CON_NBCON)
|
||
+ nbcon_kthread_wake(con);
|
||
+ }
|
||
+ console_srcu_read_unlock(cookie);
|
||
+
|
||
+ wake_up_legacy_kthread();
|
||
+
|
||
pr_flush(1000, true);
|
||
}
|
||
|
||
@@ -2646,7 +2766,8 @@ void resume_console(void)
|
||
*/
|
||
static int console_cpu_notify(unsigned int cpu)
|
||
{
|
||
- if (!cpuhp_tasks_frozen) {
|
||
+ if (!cpuhp_tasks_frozen && printing_via_unlock &&
|
||
+ !IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
/* If trylock fails, someone else is doing the printing */
|
||
if (console_trylock())
|
||
console_unlock();
|
||
@@ -2654,26 +2775,6 @@ static int console_cpu_notify(unsigned int cpu)
|
||
return 0;
|
||
}
|
||
|
||
-/*
|
||
- * Return true if a panic is in progress on a remote CPU.
|
||
- *
|
||
- * On true, the local CPU should immediately release any printing resources
|
||
- * that may be needed by the panic CPU.
|
||
- */
|
||
-bool other_cpu_in_panic(void)
|
||
-{
|
||
- if (!panic_in_progress())
|
||
- return false;
|
||
-
|
||
- /*
|
||
- * We can use raw_smp_processor_id() here because it is impossible for
|
||
- * the task to be migrated to the panic_cpu, or away from it. If
|
||
- * panic_cpu has already been set, and we're not currently executing on
|
||
- * that CPU, then we never will be.
|
||
- */
|
||
- return atomic_read(&panic_cpu) != raw_smp_processor_id();
|
||
-}
|
||
-
|
||
/**
|
||
* console_lock - block the console subsystem from printing
|
||
*
|
||
@@ -2723,42 +2824,16 @@ int is_console_locked(void)
|
||
}
|
||
EXPORT_SYMBOL(is_console_locked);
|
||
|
||
-/*
|
||
- * Check if the given console is currently capable and allowed to print
|
||
- * records.
|
||
- *
|
||
- * Requires the console_srcu_read_lock.
|
||
- */
|
||
-static inline bool console_is_usable(struct console *con)
|
||
-{
|
||
- short flags = console_srcu_read_flags(con);
|
||
-
|
||
- if (!(flags & CON_ENABLED))
|
||
- return false;
|
||
-
|
||
- if ((flags & CON_SUSPENDED))
|
||
- return false;
|
||
-
|
||
- if (!con->write)
|
||
- return false;
|
||
-
|
||
- /*
|
||
- * Console drivers may assume that per-cpu resources have been
|
||
- * allocated. So unless they're explicitly marked as being able to
|
||
- * cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
||
- */
|
||
- if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
||
- return false;
|
||
-
|
||
- return true;
|
||
-}
|
||
-
|
||
static void __console_unlock(void)
|
||
{
|
||
console_locked = 0;
|
||
up_console_sem();
|
||
}
|
||
|
||
+static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP);
|
||
+
|
||
+#ifdef CONFIG_PRINTK
|
||
+
|
||
/*
|
||
* Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
|
||
* is achieved by shifting the existing message over and inserting the dropped
|
||
@@ -2773,8 +2848,7 @@ static void __console_unlock(void)
|
||
*
|
||
* If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
|
||
*/
|
||
-#ifdef CONFIG_PRINTK
|
||
-static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
|
||
+void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
|
||
{
|
||
struct printk_buffers *pbufs = pmsg->pbufs;
|
||
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
|
||
@@ -2805,9 +2879,6 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d
|
||
memcpy(outbuf, scratchbuf, len);
|
||
pmsg->outbuf_len += len;
|
||
}
|
||
-#else
|
||
-#define console_prepend_dropped(pmsg, dropped)
|
||
-#endif /* CONFIG_PRINTK */
|
||
|
||
/*
|
||
* Read and format the specified record (or a later record if the specified
|
||
@@ -2828,11 +2899,9 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d
|
||
* of @pmsg are valid. (See the documentation of struct printk_message
|
||
* for information about the @pmsg fields.)
|
||
*/
|
||
-static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
- bool is_extended, bool may_suppress)
|
||
+bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
+ bool is_extended, bool may_suppress)
|
||
{
|
||
- static int panic_console_dropped;
|
||
-
|
||
struct printk_buffers *pbufs = pmsg->pbufs;
|
||
const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
|
||
const size_t outbuf_sz = sizeof(pbufs->outbuf);
|
||
@@ -2860,17 +2929,6 @@ static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
pmsg->seq = r.info->seq;
|
||
pmsg->dropped = r.info->seq - seq;
|
||
|
||
- /*
|
||
- * Check for dropped messages in panic here so that printk
|
||
- * suppression can occur as early as possible if necessary.
|
||
- */
|
||
- if (pmsg->dropped &&
|
||
- panic_in_progress() &&
|
||
- panic_console_dropped++ > 10) {
|
||
- suppress_panic_printk = 1;
|
||
- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
|
||
- }
|
||
-
|
||
/* Skip record that has level above the console loglevel. */
|
||
if (may_suppress && suppress_message_printing(r.info->level))
|
||
goto out;
|
||
@@ -2887,6 +2945,13 @@ static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
return true;
|
||
}
|
||
|
||
+/*
|
||
+ * Used as the printk buffers for non-panic, serialized console printing.
|
||
+ * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
|
||
+ * Its usage requires the console_lock held.
|
||
+ */
|
||
+struct printk_buffers printk_shared_pbufs;
|
||
+
|
||
/*
|
||
* Print one record for the given console. The record printed is whatever
|
||
* record is the next available record for the given console.
|
||
@@ -2904,12 +2969,10 @@ static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
||
*/
|
||
static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
|
||
{
|
||
- static struct printk_buffers pbufs;
|
||
-
|
||
bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
|
||
- char *outbuf = &pbufs.outbuf[0];
|
||
+ char *outbuf = &printk_shared_pbufs.outbuf[0];
|
||
struct printk_message pmsg = {
|
||
- .pbufs = &pbufs,
|
||
+ .pbufs = &printk_shared_pbufs,
|
||
};
|
||
unsigned long flags;
|
||
|
||
@@ -2931,35 +2994,59 @@ static bool console_emit_next_record(struct console *con, bool *handover, int co
|
||
con->dropped = 0;
|
||
}
|
||
|
||
- /*
|
||
- * While actively printing out messages, if another printk()
|
||
- * were to occur on another CPU, it may wait for this one to
|
||
- * finish. This task can not be preempted if there is a
|
||
- * waiter waiting to take over.
|
||
- *
|
||
- * Interrupts are disabled because the hand over to a waiter
|
||
- * must not be interrupted until the hand over is completed
|
||
- * (@console_waiter is cleared).
|
||
- */
|
||
- printk_safe_enter_irqsave(flags);
|
||
- console_lock_spinning_enable();
|
||
+ /* Write everything out to the hardware. */
|
||
|
||
- /* Do not trace print latency. */
|
||
- stop_critical_timings();
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ /*
|
||
+ * On PREEMPT_RT this function is either in a thread or
|
||
+ * panic context. So there is no need for concern about
|
||
+ * printk reentrance, handovers, or lockdep complaints.
|
||
+ */
|
||
|
||
- /* Write everything out to the hardware. */
|
||
- con->write(con, outbuf, pmsg.outbuf_len);
|
||
+ con->write(con, outbuf, pmsg.outbuf_len);
|
||
+ con->seq = pmsg.seq + 1;
|
||
+ } else {
|
||
+ /*
|
||
+ * While actively printing out messages, if another printk()
|
||
+ * were to occur on another CPU, it may wait for this one to
|
||
+ * finish. This task can not be preempted if there is a
|
||
+ * waiter waiting to take over.
|
||
+ *
|
||
+ * Interrupts are disabled because the hand over to a waiter
|
||
+ * must not be interrupted until the hand over is completed
|
||
+ * (@console_waiter is cleared).
|
||
+ */
|
||
+ printk_safe_enter_irqsave(flags);
|
||
+ console_lock_spinning_enable();
|
||
|
||
- start_critical_timings();
|
||
+ /* Do not trace print latency. */
|
||
+ stop_critical_timings();
|
||
|
||
- con->seq = pmsg.seq + 1;
|
||
+ lock_map_acquire_try(&printk_legacy_map);
|
||
+ con->write(con, outbuf, pmsg.outbuf_len);
|
||
+ lock_map_release(&printk_legacy_map);
|
||
|
||
- *handover = console_lock_spinning_disable_and_check(cookie);
|
||
- printk_safe_exit_irqrestore(flags);
|
||
+ start_critical_timings();
|
||
+
|
||
+ con->seq = pmsg.seq + 1;
|
||
+
|
||
+ *handover = console_lock_spinning_disable_and_check(cookie);
|
||
+ printk_safe_exit_irqrestore(flags);
|
||
+ }
|
||
skip:
|
||
return true;
|
||
}
|
||
|
||
+#else
|
||
+
|
||
+static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
|
||
+{
|
||
+ *handover = false;
|
||
+ return false;
|
||
+}
|
||
+
|
||
+#endif /* CONFIG_PRINTK */
|
||
+
|
||
/*
|
||
* Print out all remaining records to all consoles.
|
||
*
|
||
@@ -2998,13 +3085,33 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||
|
||
cookie = console_srcu_read_lock();
|
||
for_each_console_srcu(con) {
|
||
+ short flags = console_srcu_read_flags(con);
|
||
+ u64 printk_seq;
|
||
bool progress;
|
||
|
||
- if (!console_is_usable(con))
|
||
+ /*
|
||
+ * console_flush_all() is only for legacy consoles,
|
||
+ * unless the nbcon console has no kthread printer.
|
||
+ */
|
||
+ if ((flags & CON_NBCON) && con->kthread)
|
||
+ continue;
|
||
+
|
||
+ if (!console_is_usable(con, flags, true))
|
||
continue;
|
||
any_usable = true;
|
||
|
||
- progress = console_emit_next_record(con, handover, cookie);
|
||
+ if (flags & CON_NBCON) {
|
||
+
|
||
+ lock_map_acquire_try(&printk_legacy_map);
|
||
+ progress = nbcon_atomic_emit_next_record(con, handover, cookie);
|
||
+ lock_map_release(&printk_legacy_map);
|
||
+
|
||
+ printk_seq = nbcon_seq_read(con);
|
||
+ } else {
|
||
+ progress = console_emit_next_record(con, handover, cookie);
|
||
+
|
||
+ printk_seq = con->seq;
|
||
+ }
|
||
|
||
/*
|
||
* If a handover has occurred, the SRCU read lock
|
||
@@ -3014,8 +3121,8 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||
return false;
|
||
|
||
/* Track the next of the highest seq flushed. */
|
||
- if (con->seq > *next_seq)
|
||
- *next_seq = con->seq;
|
||
+ if (printk_seq > *next_seq)
|
||
+ *next_seq = printk_seq;
|
||
|
||
if (!progress)
|
||
continue;
|
||
@@ -3038,19 +3145,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||
return false;
|
||
}
|
||
|
||
-/**
|
||
- * console_unlock - unblock the console subsystem from printing
|
||
- *
|
||
- * Releases the console_lock which the caller holds to block printing of
|
||
- * the console subsystem.
|
||
- *
|
||
- * While the console_lock was held, console output may have been buffered
|
||
- * by printk(). If this is the case, console_unlock(); emits
|
||
- * the output prior to releasing the lock.
|
||
- *
|
||
- * console_unlock(); may be called from any context.
|
||
- */
|
||
-void console_unlock(void)
|
||
+static void console_flush_and_unlock(void)
|
||
{
|
||
bool do_cond_resched;
|
||
bool handover;
|
||
@@ -3094,6 +3189,32 @@ void console_unlock(void)
|
||
*/
|
||
} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
|
||
}
|
||
+
|
||
+/**
|
||
+ * console_unlock - unblock the console subsystem from printing
|
||
+ *
|
||
+ * Releases the console_lock which the caller holds to block printing of
|
||
+ * the console subsystem.
|
||
+ *
|
||
+ * While the console_lock was held, console output may have been buffered
|
||
+ * by printk(). If this is the case, console_unlock(); emits
|
||
+ * the output prior to releasing the lock.
|
||
+ *
|
||
+ * console_unlock(); may be called from any context.
|
||
+ */
|
||
+void console_unlock(void)
|
||
+{
|
||
+ /*
|
||
+ * PREEMPT_RT relies on kthread and atomic consoles for printing.
|
||
+ * It never attempts to print from console_unlock().
|
||
+ */
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ __console_unlock();
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ console_flush_and_unlock();
|
||
+}
|
||
EXPORT_SYMBOL(console_unlock);
|
||
|
||
/**
|
||
@@ -3204,6 +3325,7 @@ void console_flush_on_panic(enum con_flush_mode mode)
|
||
|
||
if (mode == CONSOLE_REPLAY_ALL) {
|
||
struct console *c;
|
||
+ short flags;
|
||
int cookie;
|
||
u64 seq;
|
||
|
||
@@ -3211,16 +3333,25 @@ void console_flush_on_panic(enum con_flush_mode mode)
|
||
|
||
cookie = console_srcu_read_lock();
|
||
for_each_console_srcu(c) {
|
||
- /*
|
||
- * This is an unsynchronized assignment, but the
|
||
- * kernel is in "hope and pray" mode anyway.
|
||
- */
|
||
- c->seq = seq;
|
||
+ flags = console_srcu_read_flags(c);
|
||
+
|
||
+ if (flags & CON_NBCON) {
|
||
+ nbcon_seq_force(c, seq);
|
||
+ } else {
|
||
+ /*
|
||
+ * This is an unsynchronized assignment. On
|
||
+ * panic legacy consoles are only best effort.
|
||
+ */
|
||
+ c->seq = seq;
|
||
+ }
|
||
}
|
||
console_srcu_read_unlock(cookie);
|
||
}
|
||
|
||
- console_flush_all(false, &next_seq, &handover);
|
||
+ nbcon_atomic_flush_all();
|
||
+
|
||
+ if (printing_via_unlock)
|
||
+ console_flush_all(false, &next_seq, &handover);
|
||
}
|
||
|
||
/*
|
||
@@ -3277,13 +3408,122 @@ EXPORT_SYMBOL(console_stop);
|
||
|
||
void console_start(struct console *console)
|
||
{
|
||
+ short flags;
|
||
+
|
||
console_list_lock();
|
||
console_srcu_write_flags(console, console->flags | CON_ENABLED);
|
||
+ flags = console->flags;
|
||
console_list_unlock();
|
||
+
|
||
+ /*
|
||
+ * Ensure that all SRCU list walks have completed. The related
|
||
+ * printing context must be able to see it is enabled so that
|
||
+ * it is guaranteed to wake up and resume printing.
|
||
+ */
|
||
+ synchronize_srcu(&console_srcu);
|
||
+
|
||
+ if (flags & CON_NBCON)
|
||
+ nbcon_kthread_wake(console);
|
||
+ else
|
||
+ wake_up_legacy_kthread();
|
||
+
|
||
__pr_flush(console, 1000, true);
|
||
}
|
||
EXPORT_SYMBOL(console_start);
|
||
|
||
+#ifdef CONFIG_PRINTK
|
||
+static bool printer_should_wake(void)
|
||
+{
|
||
+ bool available = false;
|
||
+ struct console *con;
|
||
+ int cookie;
|
||
+
|
||
+ if (kthread_should_stop())
|
||
+ return true;
|
||
+
|
||
+ cookie = console_srcu_read_lock();
|
||
+ for_each_console_srcu(con) {
|
||
+ short flags = console_srcu_read_flags(con);
|
||
+ u64 printk_seq;
|
||
+
|
||
+ /*
|
||
+ * The legacy printer thread is only for legacy consoles,
|
||
+ * unless the nbcon console has no kthread printer.
|
||
+ */
|
||
+ if ((flags & CON_NBCON) && con->kthread)
|
||
+ continue;
|
||
+
|
||
+ if (!console_is_usable(con, flags, true))
|
||
+ continue;
|
||
+
|
||
+ if (flags & CON_NBCON) {
|
||
+ printk_seq = nbcon_seq_read(con);
|
||
+ } else {
|
||
+ /*
|
||
+ * It is safe to read @seq because only this
|
||
+ * thread context updates @seq.
|
||
+ */
|
||
+ printk_seq = con->seq;
|
||
+ }
|
||
+
|
||
+ if (prb_read_valid(prb, printk_seq, NULL)) {
|
||
+ available = true;
|
||
+ break;
|
||
+ }
|
||
+ }
|
||
+ console_srcu_read_unlock(cookie);
|
||
+
|
||
+ return available;
|
||
+}
|
||
+
|
||
+static int nbcon_legacy_kthread_func(void *unused)
|
||
+{
|
||
+ int error;
|
||
+
|
||
+ for (;;) {
|
||
+ error = wait_event_interruptible(legacy_wait, printer_should_wake());
|
||
+
|
||
+ if (kthread_should_stop())
|
||
+ break;
|
||
+
|
||
+ if (error)
|
||
+ continue;
|
||
+
|
||
+ console_lock();
|
||
+ console_flush_and_unlock();
|
||
+ }
|
||
+
|
||
+ return 0;
|
||
+}
|
||
+
|
||
+void nbcon_legacy_kthread_create(void)
|
||
+{
|
||
+ struct task_struct *kt;
|
||
+
|
||
+ lockdep_assert_held(&console_mutex);
|
||
+
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ return;
|
||
+
|
||
+ if (!printk_threads_enabled || nbcon_legacy_kthread)
|
||
+ return;
|
||
+
|
||
+ kt = kthread_run(nbcon_legacy_kthread_func, NULL, "pr/legacy");
|
||
+ if (IS_ERR(kt)) {
|
||
+ pr_err("unable to start legacy printing thread\n");
|
||
+ return;
|
||
+ }
|
||
+
|
||
+ nbcon_legacy_kthread = kt;
|
||
+
|
||
+ /*
|
||
+ * It is important that console printing threads are scheduled
|
||
+ * shortly after a printk call and with generous runtime budgets.
|
||
+ */
|
||
+ sched_set_normal(nbcon_legacy_kthread, -20);
|
||
+}
|
||
+#endif /* CONFIG_PRINTK */
|
||
+
|
||
static int __read_mostly keep_bootcon;
|
||
|
||
static int __init keep_bootcon_setup(char *str)
|
||
@@ -3382,11 +3622,6 @@ static void try_enable_default_console(struct console *newcon)
|
||
newcon->flags |= CON_CONSDEV;
|
||
}
|
||
|
||
-#define con_printk(lvl, con, fmt, ...) \
|
||
- printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
|
||
- (con->flags & CON_BOOT) ? "boot" : "", \
|
||
- con->name, con->index, ##__VA_ARGS__)
|
||
-
|
||
static void console_init_seq(struct console *newcon, bool bootcon_registered)
|
||
{
|
||
struct console *con;
|
||
@@ -3435,11 +3670,20 @@ static void console_init_seq(struct console *newcon, bool bootcon_registered)
|
||
|
||
newcon->seq = prb_next_seq(prb);
|
||
for_each_console(con) {
|
||
- if ((con->flags & CON_BOOT) &&
|
||
- (con->flags & CON_ENABLED) &&
|
||
- con->seq < newcon->seq) {
|
||
- newcon->seq = con->seq;
|
||
+ u64 seq;
|
||
+
|
||
+ if (!((con->flags & CON_BOOT) &&
|
||
+ (con->flags & CON_ENABLED))) {
|
||
+ continue;
|
||
}
|
||
+
|
||
+ if (con->flags & CON_NBCON)
|
||
+ seq = nbcon_seq_read(con);
|
||
+ else
|
||
+ seq = con->seq;
|
||
+
|
||
+ if (seq < newcon->seq)
|
||
+ newcon->seq = seq;
|
||
}
|
||
}
|
||
|
||
@@ -3500,6 +3744,15 @@ void register_console(struct console *newcon)
|
||
goto unlock;
|
||
}
|
||
|
||
+ if (newcon->flags & CON_NBCON) {
|
||
+ /*
|
||
+ * Ensure the nbcon console buffers can be allocated
|
||
+ * before modifying any global data.
|
||
+ */
|
||
+ if (!nbcon_alloc(newcon))
|
||
+ goto unlock;
|
||
+ }
|
||
+
|
||
/*
|
||
* See if we want to enable this console driver by default.
|
||
*
|
||
@@ -3527,8 +3780,11 @@ void register_console(struct console *newcon)
|
||
err = try_enable_preferred_console(newcon, false);
|
||
|
||
/* printk() messages are not printed to the Braille console. */
|
||
- if (err || newcon->flags & CON_BRL)
|
||
+ if (err || newcon->flags & CON_BRL) {
|
||
+ if (newcon->flags & CON_NBCON)
|
||
+ nbcon_free(newcon);
|
||
goto unlock;
|
||
+ }
|
||
|
||
/*
|
||
* If we have a bootconsole, and are switching to a real console,
|
||
@@ -3544,6 +3800,17 @@ void register_console(struct console *newcon)
|
||
newcon->dropped = 0;
|
||
console_init_seq(newcon, bootcon_registered);
|
||
|
||
+ if (newcon->flags & CON_NBCON) {
|
||
+ have_nbcon_console = true;
|
||
+ nbcon_init(newcon);
|
||
+ } else {
|
||
+ have_legacy_console = true;
|
||
+ nbcon_legacy_kthread_create();
|
||
+ }
|
||
+
|
||
+ if (newcon->flags & CON_BOOT)
|
||
+ have_boot_console = true;
|
||
+
|
||
/*
|
||
* Put this console in the list - keep the
|
||
* preferred driver at the head of the list.
|
||
@@ -3596,6 +3863,11 @@ EXPORT_SYMBOL(register_console);
|
||
/* Must be called under console_list_lock(). */
|
||
static int unregister_console_locked(struct console *console)
|
||
{
|
||
+ bool is_boot_con = (console->flags & CON_BOOT);
|
||
+ bool found_legacy_con = false;
|
||
+ bool found_nbcon_con = false;
|
||
+ bool found_boot_con = false;
|
||
+ struct console *c;
|
||
int res;
|
||
|
||
lockdep_assert_console_list_lock_held();
|
||
@@ -3635,11 +3907,50 @@ static int unregister_console_locked(struct console *console)
|
||
*/
|
||
synchronize_srcu(&console_srcu);
|
||
|
||
+ if (console->flags & CON_NBCON)
|
||
+ nbcon_free(console);
|
||
+
|
||
console_sysfs_notify();
|
||
|
||
if (console->exit)
|
||
res = console->exit(console);
|
||
|
||
+ /*
|
||
+ * With this console gone, the global flags tracking registered
|
||
+ * console types may have changed. Update them.
|
||
+ */
|
||
+ for_each_console(c) {
|
||
+ if (c->flags & CON_BOOT)
|
||
+ found_boot_con = true;
|
||
+
|
||
+ if (c->flags & CON_NBCON)
|
||
+ found_nbcon_con = true;
|
||
+ else
|
||
+ found_legacy_con = true;
|
||
+ }
|
||
+ if (!found_boot_con)
|
||
+ have_boot_console = false;
|
||
+ if (!found_legacy_con)
|
||
+ have_legacy_console = false;
|
||
+ if (!found_nbcon_con)
|
||
+ have_nbcon_console = false;
|
||
+
|
||
+ /*
|
||
+ * When the last boot console unregisters, start up the
|
||
+ * printing threads.
|
||
+ */
|
||
+ if (is_boot_con && !have_boot_console) {
|
||
+ for_each_console(c)
|
||
+ nbcon_kthread_create(c);
|
||
+ }
|
||
+
|
||
+#ifdef CONFIG_PRINTK
|
||
+ if (!printing_via_unlock && nbcon_legacy_kthread) {
|
||
+ kthread_stop(nbcon_legacy_kthread);
|
||
+ nbcon_legacy_kthread = NULL;
|
||
+ }
|
||
+#endif
|
||
+
|
||
return res;
|
||
}
|
||
|
||
@@ -3784,69 +4095,94 @@ late_initcall(printk_late_init);
|
||
/* If @con is specified, only wait for that console. Otherwise wait for all. */
|
||
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
|
||
{
|
||
- int remaining = timeout_ms;
|
||
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
|
||
+ unsigned long remaining_jiffies = timeout_jiffies;
|
||
struct console *c;
|
||
u64 last_diff = 0;
|
||
u64 printk_seq;
|
||
+ short flags;
|
||
+ bool locked;
|
||
int cookie;
|
||
u64 diff;
|
||
u64 seq;
|
||
|
||
might_sleep();
|
||
|
||
- seq = prb_next_seq(prb);
|
||
+ seq = prb_next_reserve_seq(prb);
|
||
|
||
- /* Flush the consoles so that records up to @seq are printed. */
|
||
- console_lock();
|
||
- console_unlock();
|
||
+ /*
|
||
+ * Flush the consoles so that records up to @seq are printed.
|
||
+ * Otherwise this function will just wait for the threaded printers
|
||
+ * to print up to @seq.
|
||
+ */
|
||
+ if (printing_via_unlock && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ console_lock();
|
||
+ console_unlock();
|
||
+ }
|
||
|
||
for (;;) {
|
||
+ unsigned long begin_jiffies;
|
||
+ unsigned long slept_jiffies;
|
||
+
|
||
+ locked = false;
|
||
diff = 0;
|
||
|
||
- /*
|
||
- * Hold the console_lock to guarantee safe access to
|
||
- * console->seq. Releasing console_lock flushes more
|
||
- * records in case @seq is still not printed on all
|
||
- * usable consoles.
|
||
- */
|
||
- console_lock();
|
||
+ if (printing_via_unlock) {
|
||
+ /*
|
||
+ * Hold the console_lock to guarantee safe access to
|
||
+ * console->seq. Releasing console_lock flushes more
|
||
+ * records in case @seq is still not printed on all
|
||
+ * usable consoles.
|
||
+ */
|
||
+ console_lock();
|
||
+ locked = true;
|
||
+ }
|
||
|
||
cookie = console_srcu_read_lock();
|
||
for_each_console_srcu(c) {
|
||
if (con && con != c)
|
||
continue;
|
||
+
|
||
+ flags = console_srcu_read_flags(c);
|
||
+
|
||
/*
|
||
* If consoles are not usable, it cannot be expected
|
||
* that they make forward progress, so only increment
|
||
* @diff for usable consoles.
|
||
*/
|
||
- if (!console_is_usable(c))
|
||
+ if (!console_is_usable(c, flags, true) &&
|
||
+ !console_is_usable(c, flags, false)) {
|
||
continue;
|
||
- printk_seq = c->seq;
|
||
+ }
|
||
+
|
||
+ if (flags & CON_NBCON) {
|
||
+ printk_seq = nbcon_seq_read(c);
|
||
+ } else {
|
||
+ WARN_ON_ONCE(!locked);
|
||
+ printk_seq = c->seq;
|
||
+ }
|
||
+
|
||
if (printk_seq < seq)
|
||
diff += seq - printk_seq;
|
||
}
|
||
console_srcu_read_unlock(cookie);
|
||
|
||
if (diff != last_diff && reset_on_progress)
|
||
- remaining = timeout_ms;
|
||
+ remaining_jiffies = timeout_jiffies;
|
||
|
||
- console_unlock();
|
||
+ if (locked)
|
||
+ console_unlock();
|
||
|
||
/* Note: @diff is 0 if there are no usable consoles. */
|
||
- if (diff == 0 || remaining == 0)
|
||
+ if (diff == 0 || remaining_jiffies == 0)
|
||
break;
|
||
|
||
- if (remaining < 0) {
|
||
- /* no timeout limit */
|
||
- msleep(100);
|
||
- } else if (remaining < 100) {
|
||
- msleep(remaining);
|
||
- remaining = 0;
|
||
- } else {
|
||
- msleep(100);
|
||
- remaining -= 100;
|
||
- }
|
||
+ /* msleep(1) might sleep much longer. Check time by jiffies. */
|
||
+ begin_jiffies = jiffies;
|
||
+ msleep(1);
|
||
+ slept_jiffies = jiffies - begin_jiffies;
|
||
+
|
||
+ remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
|
||
|
||
last_diff = diff;
|
||
}
|
||
@@ -3887,9 +4223,16 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
||
int pending = this_cpu_xchg(printk_pending, 0);
|
||
|
||
if (pending & PRINTK_PENDING_OUTPUT) {
|
||
- /* If trylock fails, someone else is doing the printing */
|
||
- if (console_trylock())
|
||
- console_unlock();
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||
+ wake_up_interruptible(&legacy_wait);
|
||
+ } else {
|
||
+ /*
|
||
+ * If trylock fails, some other context
|
||
+ * will do the printing.
|
||
+ */
|
||
+ if (console_trylock())
|
||
+ console_unlock();
|
||
+ }
|
||
}
|
||
|
||
if (pending & PRINTK_PENDING_WAKEUP)
|
||
@@ -3957,11 +4300,16 @@ void defer_console_output(void)
|
||
* New messages may have been added directly to the ringbuffer
|
||
* using vprintk_store(), so wake any waiters as well.
|
||
*/
|
||
- __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
|
||
+ int val = PRINTK_PENDING_WAKEUP;
|
||
+
|
||
+ if (printing_via_unlock)
|
||
+ val |= PRINTK_PENDING_OUTPUT;
|
||
+ __wake_up_klogd(val);
|
||
}
|
||
|
||
void printk_trigger_flush(void)
|
||
{
|
||
+ nbcon_wake_threads();
|
||
defer_console_output();
|
||
}
|
||
|
||
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
|
||
index fde338606..e7b808b82 100644
|
||
--- a/kernel/printk/printk_ringbuffer.c
|
||
+++ b/kernel/printk/printk_ringbuffer.c
|
||
@@ -6,6 +6,7 @@
|
||
#include <linux/errno.h>
|
||
#include <linux/bug.h>
|
||
#include "printk_ringbuffer.h"
|
||
+#include "internal.h"
|
||
|
||
/**
|
||
* DOC: printk_ringbuffer overview
|
||
@@ -303,6 +304,9 @@
|
||
*
|
||
* desc_push_tail:B / desc_reserve:D
|
||
* set descriptor reusable (state), then push descriptor tail (id)
|
||
+ *
|
||
+ * desc_update_last_finalized:A / desc_last_finalized_seq:A
|
||
+ * store finalized record, then set new highest finalized sequence number
|
||
*/
|
||
|
||
#define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
|
||
@@ -1030,9 +1034,13 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
|
||
unsigned long next_lpos;
|
||
|
||
if (size == 0) {
|
||
- /* Specify a data-less block. */
|
||
- blk_lpos->begin = NO_LPOS;
|
||
- blk_lpos->next = NO_LPOS;
|
||
+ /*
|
||
+ * Data blocks are not created for empty lines. Instead, the
|
||
+ * reader will recognize these special lpos values and handle
|
||
+ * it appropriately.
|
||
+ */
|
||
+ blk_lpos->begin = EMPTY_LINE_LPOS;
|
||
+ blk_lpos->next = EMPTY_LINE_LPOS;
|
||
return NULL;
|
||
}
|
||
|
||
@@ -1210,10 +1218,18 @@ static const char *get_data(struct prb_data_ring *data_ring,
|
||
|
||
/* Data-less data block description. */
|
||
if (BLK_DATALESS(blk_lpos)) {
|
||
- if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
|
||
+ /*
|
||
+ * Records that are just empty lines are also valid, even
|
||
+ * though they do not have a data block. For such records
|
||
+ * explicitly return empty string data to signify success.
|
||
+ */
|
||
+ if (blk_lpos->begin == EMPTY_LINE_LPOS &&
|
||
+ blk_lpos->next == EMPTY_LINE_LPOS) {
|
||
*data_size = 0;
|
||
return "";
|
||
}
|
||
+
|
||
+ /* Data lost, invalid, or otherwise unavailable. */
|
||
return NULL;
|
||
}
|
||
|
||
@@ -1441,20 +1457,118 @@ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer
|
||
return false;
|
||
}
|
||
|
||
+/*
|
||
+ * @last_finalized_seq value guarantees that all records up to and including
|
||
+ * this sequence number are finalized and can be read. The only exception are
|
||
+ * too old records which have already been overwritten.
|
||
+ *
|
||
+ * It is also guaranteed that @last_finalized_seq only increases.
|
||
+ *
|
||
+ * Be aware that finalized records following non-finalized records are not
|
||
+ * reported because they are not yet available to the reader. For example,
|
||
+ * a new record stored via printk() will not be available to a printer if
|
||
+ * it follows a record that has not been finalized yet. However, once that
|
||
+ * non-finalized record becomes finalized, @last_finalized_seq will be
|
||
+ * appropriately updated and the full set of finalized records will be
|
||
+ * available to the printer. And since each printk() caller will either
|
||
+ * directly print or trigger deferred printing of all available unprinted
|
||
+ * records, all printk() messages will get printed.
|
||
+ */
|
||
+static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
|
||
+{
|
||
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
+ unsigned long ulseq;
|
||
+
|
||
+ /*
|
||
+ * Guarantee the sequence number is loaded before loading the
|
||
+ * associated record in order to guarantee that the record can be
|
||
+ * seen by this CPU. This pairs with desc_update_last_finalized:A.
|
||
+ */
|
||
+ ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq
|
||
+ ); /* LMM(desc_last_finalized_seq:A) */
|
||
+
|
||
+ return __ulseq_to_u64seq(rb, ulseq);
|
||
+}
|
||
+
|
||
+static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
|
||
+ struct printk_record *r, unsigned int *line_count);
|
||
+
|
||
+/*
|
||
+ * Check if there are records directly following @last_finalized_seq that are
|
||
+ * finalized. If so, update @last_finalized_seq to the latest of these
|
||
+ * records. It is not allowed to skip over records that are not yet finalized.
|
||
+ */
|
||
+static void desc_update_last_finalized(struct printk_ringbuffer *rb)
|
||
+{
|
||
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
+ u64 old_seq = desc_last_finalized_seq(rb);
|
||
+ unsigned long oldval;
|
||
+ unsigned long newval;
|
||
+ u64 finalized_seq;
|
||
+ u64 try_seq;
|
||
+
|
||
+try_again:
|
||
+ finalized_seq = old_seq;
|
||
+ try_seq = finalized_seq + 1;
|
||
+
|
||
+ /* Try to find later finalized records. */
|
||
+ while (_prb_read_valid(rb, &try_seq, NULL, NULL)) {
|
||
+ finalized_seq = try_seq;
|
||
+ try_seq++;
|
||
+ }
|
||
+
|
||
+ /* No update needed if no later finalized record was found. */
|
||
+ if (finalized_seq == old_seq)
|
||
+ return;
|
||
+
|
||
+ oldval = __u64seq_to_ulseq(old_seq);
|
||
+ newval = __u64seq_to_ulseq(finalized_seq);
|
||
+
|
||
+ /*
|
||
+ * Set the sequence number of a later finalized record that has been
|
||
+ * seen.
|
||
+ *
|
||
+ * Guarantee the record data is visible to other CPUs before storing
|
||
+ * its sequence number. This pairs with desc_last_finalized_seq:A.
|
||
+ *
|
||
+ * Memory barrier involvement:
|
||
+ *
|
||
+ * If desc_last_finalized_seq:A reads from
|
||
+ * desc_update_last_finalized:A, then desc_read:A reads from
|
||
+ * _prb_commit:B.
|
||
+ *
|
||
+ * Relies on:
|
||
+ *
|
||
+ * RELEASE from _prb_commit:B to desc_update_last_finalized:A
|
||
+ * matching
|
||
+ * ACQUIRE from desc_last_finalized_seq:A to desc_read:A
|
||
+ *
|
||
+ * Note: _prb_commit:B and desc_update_last_finalized:A can be
|
||
+ * different CPUs. However, the desc_update_last_finalized:A
|
||
+ * CPU (which performs the release) must have previously seen
|
||
+ * _prb_commit:B.
|
||
+ */
|
||
+ if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq,
|
||
+ &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */
|
||
+ old_seq = __ulseq_to_u64seq(rb, oldval);
|
||
+ goto try_again;
|
||
+ }
|
||
+}
|
||
+
|
||
/*
|
||
* Attempt to finalize a specified descriptor. If this fails, the descriptor
|
||
* is either already final or it will finalize itself when the writer commits.
|
||
*/
|
||
-static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
|
||
+static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
|
||
{
|
||
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
unsigned long prev_state_val = DESC_SV(id, desc_committed);
|
||
struct prb_desc *d = to_desc(desc_ring, id);
|
||
|
||
- atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
|
||
- DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
|
||
-
|
||
- /* Best effort to remember the last finalized @id. */
|
||
- atomic_long_set(&desc_ring->last_finalized_id, id);
|
||
+ if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val,
|
||
+ DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */
|
||
+ desc_update_last_finalized(rb);
|
||
+ }
|
||
}
|
||
|
||
/**
|
||
@@ -1550,7 +1664,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
|
||
* readers. (For seq==0 there is no previous descriptor.)
|
||
*/
|
||
if (info->seq > 0)
|
||
- desc_make_final(desc_ring, DESC_ID(id - 1));
|
||
+ desc_make_final(rb, DESC_ID(id - 1));
|
||
|
||
r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
|
||
/* If text data allocation fails, a data-less record is committed. */
|
||
@@ -1643,7 +1757,7 @@ void prb_commit(struct prb_reserved_entry *e)
|
||
*/
|
||
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
|
||
if (head_id != e->id)
|
||
- desc_make_final(desc_ring, e->id);
|
||
+ desc_make_final(e->rb, e->id);
|
||
}
|
||
|
||
/**
|
||
@@ -1663,12 +1777,9 @@ void prb_commit(struct prb_reserved_entry *e)
|
||
*/
|
||
void prb_final_commit(struct prb_reserved_entry *e)
|
||
{
|
||
- struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
|
||
-
|
||
_prb_commit(e, desc_finalized);
|
||
|
||
- /* Best effort to remember the last finalized @id. */
|
||
- atomic_long_set(&desc_ring->last_finalized_id, e->id);
|
||
+ desc_update_last_finalized(e->rb);
|
||
}
|
||
|
||
/*
|
||
@@ -1746,6 +1857,8 @@ static bool copy_data(struct prb_data_ring *data_ring,
|
||
* descriptor. However, it also verifies that the record is finalized and has
|
||
* the sequence number @seq. On success, 0 is returned.
|
||
*
|
||
+ * For the panic CPU, committed descriptors are also considered finalized.
|
||
+ *
|
||
* Error return values:
|
||
* -EINVAL: A finalized record with sequence number @seq does not exist.
|
||
* -ENOENT: A finalized record with sequence number @seq exists, but its data
|
||
@@ -1764,16 +1877,25 @@ static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
|
||
|
||
/*
|
||
* An unexpected @id (desc_miss) or @seq mismatch means the record
|
||
- * does not exist. A descriptor in the reserved or committed state
|
||
- * means the record does not yet exist for the reader.
|
||
+ * does not exist. A descriptor in the reserved state means the
|
||
+ * record does not yet exist for the reader.
|
||
*/
|
||
if (d_state == desc_miss ||
|
||
d_state == desc_reserved ||
|
||
- d_state == desc_committed ||
|
||
s != seq) {
|
||
return -EINVAL;
|
||
}
|
||
|
||
+ /*
|
||
+ * A descriptor in the committed state means the record does not yet
|
||
+ * exist for the reader. However, for the panic CPU, committed
|
||
+ * records are also handled as finalized records since they contain
|
||
+ * message data in a consistent state and may contain additional
|
||
+ * hints as to the cause of the panic.
|
||
+ */
|
||
+ if (d_state == desc_committed && !this_cpu_in_panic())
|
||
+ return -EINVAL;
|
||
+
|
||
/*
|
||
* A descriptor in the reusable state may no longer have its data
|
||
* available; report it as existing but with lost data. Or the record
|
||
@@ -1832,7 +1954,7 @@ static int prb_read(struct printk_ringbuffer *rb, u64 seq,
|
||
}
|
||
|
||
/* Get the sequence number of the tail descriptor. */
|
||
-static u64 prb_first_seq(struct printk_ringbuffer *rb)
|
||
+u64 prb_first_seq(struct printk_ringbuffer *rb)
|
||
{
|
||
struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
enum desc_state d_state;
|
||
@@ -1875,12 +1997,131 @@ static u64 prb_first_seq(struct printk_ringbuffer *rb)
|
||
return seq;
|
||
}
|
||
|
||
+/**
|
||
+ * prb_next_reserve_seq() - Get the sequence number after the most recently
|
||
+ * reserved record.
|
||
+ *
|
||
+ * @rb: The ringbuffer to get the sequence number from.
|
||
+ *
|
||
+ * This is the public function available to readers to see what sequence
|
||
+ * number will be assigned to the next reserved record.
|
||
+ *
|
||
+ * Note that depending on the situation, this value can be equal to or
|
||
+ * higher than the sequence number returned by prb_next_seq().
|
||
+ *
|
||
+ * Context: Any context.
|
||
+ * Return: The sequence number that will be assigned to the next record
|
||
+ * reserved.
|
||
+ */
|
||
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
|
||
+{
|
||
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
+ unsigned long last_finalized_id;
|
||
+ atomic_long_t *state_var;
|
||
+ u64 last_finalized_seq;
|
||
+ unsigned long head_id;
|
||
+ struct prb_desc desc;
|
||
+ unsigned long diff;
|
||
+ struct prb_desc *d;
|
||
+ int err;
|
||
+
|
||
+ /*
|
||
+ * It may not be possible to read a sequence number for @head_id.
|
||
+ * So the ID of @last_finailzed_seq is used to calculate what the
|
||
+ * sequence number of @head_id will be.
|
||
+ */
|
||
+
|
||
+try_again:
|
||
+ last_finalized_seq = desc_last_finalized_seq(rb);
|
||
+
|
||
+ /*
|
||
+ * @head_id is loaded after @last_finalized_seq to ensure that it is
|
||
+ * at or beyond @last_finalized_seq.
|
||
+ *
|
||
+ * Memory barrier involvement:
|
||
+ *
|
||
+ * If desc_last_finalized_seq:A reads from
|
||
+ * desc_update_last_finalized:A, then
|
||
+ * prb_next_reserve_seq:A reads from desc_reserve:D.
|
||
+ *
|
||
+ * Relies on:
|
||
+ *
|
||
+ * RELEASE from desc_reserve:D to desc_update_last_finalized:A
|
||
+ * matching
|
||
+ * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A
|
||
+ *
|
||
+ * Note: desc_reserve:D and desc_update_last_finalized:A can be
|
||
+ * different CPUs. However, the desc_update_last_finalized:A CPU
|
||
+ * (which performs the release) must have previously seen
|
||
+ * desc_read:C, which implies desc_reserve:D can be seen.
|
||
+ */
|
||
+ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */
|
||
+
|
||
+ d = to_desc(desc_ring, last_finalized_seq);
|
||
+ state_var = &d->state_var;
|
||
+
|
||
+ /* Extract the ID, used to specify the descriptor to read. */
|
||
+ last_finalized_id = DESC_ID(atomic_long_read(state_var));
|
||
+
|
||
+ /* Ensure @last_finalized_id is correct. */
|
||
+ err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc);
|
||
+
|
||
+ if (err == -EINVAL) {
|
||
+ if (last_finalized_seq == 0) {
|
||
+ /*
|
||
+ * @last_finalized_seq still contains its initial
|
||
+ * value. Probably no record has been finalized yet.
|
||
+ * This means the ringbuffer is not yet full and the
|
||
+ * @head_id value can be used directly (subtracting
|
||
+ * off the id value corresponding to seq=0).
|
||
+ */
|
||
+
|
||
+ /*
|
||
+ * Because of hack#2 of the bootstrapping phase, the
|
||
+ * @head_id initial value must be handled separately.
|
||
+ */
|
||
+ if (head_id == DESC0_ID(desc_ring->count_bits))
|
||
+ return 0;
|
||
+
|
||
+ /*
|
||
+ * The @head_id is initialized such that the first
|
||
+ * increment will yield the first record (seq=0).
|
||
+ * Therefore use the initial value +1 as the base to
|
||
+ * subtract from @head_id.
|
||
+ */
|
||
+ last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1;
|
||
+ } else {
|
||
+ /* Record must have been overwritten. Try again. */
|
||
+ goto try_again;
|
||
+ }
|
||
+ }
|
||
+
|
||
+ /*
|
||
+ * @diff is the number of records beyond the last record available
|
||
+ * to readers.
|
||
+ */
|
||
+ diff = head_id - last_finalized_id;
|
||
+
|
||
+ /*
|
||
+ * @head_id points to the most recently reserved record, but this
|
||
+ * function returns the sequence number that will be assigned to the
|
||
+ * next (not yet reserved) record. Thus +1 is needed.
|
||
+ */
|
||
+ return (last_finalized_seq + diff + 1);
|
||
+}
|
||
+
|
||
/*
|
||
- * Non-blocking read of a record. Updates @seq to the last finalized record
|
||
- * (which may have no data available).
|
||
+ * Non-blocking read of a record.
|
||
*
|
||
- * See the description of prb_read_valid() and prb_read_valid_info()
|
||
- * for details.
|
||
+ * On success @seq is updated to the record that was read and (if provided)
|
||
+ * @r and @line_count will contain the read/calculated data.
|
||
+ *
|
||
+ * On failure @seq is updated to a record that is not yet available to the
|
||
+ * reader, but it will be the next record available to the reader.
|
||
+ *
|
||
+ * Note: When the current CPU is in panic, this function will skip over any
|
||
+ * non-existent/non-finalized records in order to allow the panic CPU
|
||
+ * to print any and all records that have been finalized.
|
||
*/
|
||
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
|
||
struct printk_record *r, unsigned int *line_count)
|
||
@@ -1899,12 +2140,32 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
|
||
*seq = tail_seq;
|
||
|
||
} else if (err == -ENOENT) {
|
||
- /* Record exists, but no data available. Skip. */
|
||
+ /* Record exists, but the data was lost. Skip. */
|
||
(*seq)++;
|
||
|
||
} else {
|
||
- /* Non-existent/non-finalized record. Must stop. */
|
||
- return false;
|
||
+ /*
|
||
+ * Non-existent/non-finalized record. Must stop.
|
||
+ *
|
||
+ * For panic situations it cannot be expected that
|
||
+ * non-finalized records will become finalized. But
|
||
+ * there may be other finalized records beyond that
|
||
+ * need to be printed for a panic situation. If this
|
||
+ * is the panic CPU, skip this
|
||
+ * non-existent/non-finalized record unless it is
|
||
+ * at or beyond the head, in which case it is not
|
||
+ * possible to continue.
|
||
+ *
|
||
+ * Note that new messages printed on panic CPU are
|
||
+ * finalized when we are here. The only exception
|
||
+ * might be the last message without trailing newline.
|
||
+ * But it would have the sequence number returned
|
||
+ * by "prb_next_reserve_seq() - 1".
|
||
+ */
|
||
+ if (this_cpu_in_panic() && ((*seq + 1) < prb_next_reserve_seq(rb)))
|
||
+ (*seq)++;
|
||
+ else
|
||
+ return false;
|
||
}
|
||
}
|
||
|
||
@@ -1932,7 +2193,7 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
|
||
* On success, the reader must check r->info.seq to see which record was
|
||
* actually read. This allows the reader to detect dropped records.
|
||
*
|
||
- * Failure means @seq refers to a not yet written record.
|
||
+ * Failure means @seq refers to a record not yet available to the reader.
|
||
*/
|
||
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
|
||
struct printk_record *r)
|
||
@@ -1962,7 +2223,7 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
|
||
* On success, the reader must check info->seq to see which record meta data
|
||
* was actually read. This allows the reader to detect dropped records.
|
||
*
|
||
- * Failure means @seq refers to a not yet written record.
|
||
+ * Failure means @seq refers to a record not yet available to the reader.
|
||
*/
|
||
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
|
||
struct printk_info *info, unsigned int *line_count)
|
||
@@ -2008,7 +2269,9 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
|
||
* newest sequence number available to readers will be.
|
||
*
|
||
* This provides readers a sequence number to jump to if all currently
|
||
- * available records should be skipped.
|
||
+ * available records should be skipped. It is guaranteed that all records
|
||
+ * previous to the returned value have been finalized and are (or were)
|
||
+ * available to the reader.
|
||
*
|
||
* Context: Any context.
|
||
* Return: The sequence number of the next newest (not yet available) record
|
||
@@ -2016,34 +2279,19 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
|
||
*/
|
||
u64 prb_next_seq(struct printk_ringbuffer *rb)
|
||
{
|
||
- struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||
- enum desc_state d_state;
|
||
- unsigned long id;
|
||
u64 seq;
|
||
|
||
- /* Check if the cached @id still points to a valid @seq. */
|
||
- id = atomic_long_read(&desc_ring->last_finalized_id);
|
||
- d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
|
||
+ seq = desc_last_finalized_seq(rb);
|
||
|
||
- if (d_state == desc_finalized || d_state == desc_reusable) {
|
||
- /*
|
||
- * Begin searching after the last finalized record.
|
||
- *
|
||
- * On 0, the search must begin at 0 because of hack#2
|
||
- * of the bootstrapping phase it is not known if a
|
||
- * record at index 0 exists.
|
||
- */
|
||
- if (seq != 0)
|
||
- seq++;
|
||
- } else {
|
||
- /*
|
||
- * The information about the last finalized sequence number
|
||
- * has gone. It should happen only when there is a flood of
|
||
- * new messages and the ringbuffer is rapidly recycled.
|
||
- * Give up and start from the beginning.
|
||
- */
|
||
- seq = 0;
|
||
- }
|
||
+ /*
|
||
+ * Begin searching after the last finalized record.
|
||
+ *
|
||
+ * On 0, the search must begin at 0 because of hack#2
|
||
+ * of the bootstrapping phase it is not known if a
|
||
+ * record at index 0 exists.
|
||
+ */
|
||
+ if (seq != 0)
|
||
+ seq++;
|
||
|
||
/*
|
||
* The information about the last finalized @seq might be inaccurate.
|
||
@@ -2085,7 +2333,7 @@ void prb_init(struct printk_ringbuffer *rb,
|
||
rb->desc_ring.infos = infos;
|
||
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
|
||
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
|
||
- atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
|
||
+ atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
|
||
|
||
rb->text_data_ring.size_bits = textbits;
|
||
rb->text_data_ring.data = text_buf;
|
||
diff --git a/kernel/printk/printk_ringbuffer.h b/kernel/printk/printk_ringbuffer.h
|
||
index 18cd25e48..52626d0f1 100644
|
||
--- a/kernel/printk/printk_ringbuffer.h
|
||
+++ b/kernel/printk/printk_ringbuffer.h
|
||
@@ -75,7 +75,7 @@ struct prb_desc_ring {
|
||
struct printk_info *infos;
|
||
atomic_long_t head_id;
|
||
atomic_long_t tail_id;
|
||
- atomic_long_t last_finalized_id;
|
||
+ atomic_long_t last_finalized_seq;
|
||
};
|
||
|
||
/*
|
||
@@ -127,8 +127,22 @@ enum desc_state {
|
||
#define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
|
||
#define DESC_ID_MASK (~DESC_FLAGS_MASK)
|
||
#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
|
||
+
|
||
+/*
|
||
+ * Special data block logical position values (for fields of
|
||
+ * @prb_desc.text_blk_lpos).
|
||
+ *
|
||
+ * - Bit0 is used to identify if the record has no data block. (Implemented in
|
||
+ * the LPOS_DATALESS() macro.)
|
||
+ *
|
||
+ * - Bit1 specifies the reason for not having a data block.
|
||
+ *
|
||
+ * These special values could never be real lpos values because of the
|
||
+ * meta data and alignment padding of data blocks. (See to_blk_size() for
|
||
+ * details.)
|
||
+ */
|
||
#define FAILED_LPOS 0x1
|
||
-#define NO_LPOS 0x3
|
||
+#define EMPTY_LINE_LPOS 0x3
|
||
|
||
#define FAILED_BLK_LPOS \
|
||
{ \
|
||
@@ -259,7 +273,7 @@ static struct printk_ringbuffer name = { \
|
||
.infos = &_##name##_infos[0], \
|
||
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||
- .last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||
+ .last_finalized_seq = ATOMIC_INIT(0), \
|
||
}, \
|
||
.text_data_ring = { \
|
||
.size_bits = (avgtextbits) + (descbits), \
|
||
@@ -378,7 +392,41 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
|
||
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
|
||
struct printk_info *info, unsigned int *line_count);
|
||
|
||
+u64 prb_first_seq(struct printk_ringbuffer *rb);
|
||
u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
|
||
u64 prb_next_seq(struct printk_ringbuffer *rb);
|
||
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
|
||
+
|
||
+#ifdef CONFIG_64BIT
|
||
+
|
||
+#define __u64seq_to_ulseq(u64seq) (u64seq)
|
||
+#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
|
||
+
|
||
+#else /* CONFIG_64BIT */
|
||
+
|
||
+#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
|
||
+
|
||
+static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
|
||
+{
|
||
+ u64 rb_first_seq = prb_first_seq(rb);
|
||
+ u64 seq;
|
||
+
|
||
+ /*
|
||
+ * The provided sequence is only the lower 32 bits of the ringbuffer
|
||
+ * sequence. It needs to be expanded to 64bit. Get the first sequence
|
||
+ * number from the ringbuffer and fold it.
|
||
+ *
|
||
+ * Having a 32bit representation in the console is sufficient.
|
||
+ * If a console ever gets more than 2^31 records behind
|
||
+ * the ringbuffer then this is the least of the problems.
|
||
+ *
|
||
+ * Also the access to the ring buffer is always safe.
|
||
+ */
|
||
+ seq = rb_first_seq - (s32)((u32)rb_first_seq - ulseq);
|
||
+
|
||
+ return seq;
|
||
+}
|
||
+
|
||
+#endif /* CONFIG_64BIT */
|
||
|
||
#endif /* _KERNEL_PRINTK_RINGBUFFER_H */
|
||
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
|
||
index 6d10927a0..8d9408d65 100644
|
||
--- a/kernel/printk/printk_safe.c
|
||
+++ b/kernel/printk/printk_safe.c
|
||
@@ -26,6 +26,18 @@ void __printk_safe_exit(void)
|
||
this_cpu_dec(printk_context);
|
||
}
|
||
|
||
+void __printk_deferred_enter(void)
|
||
+{
|
||
+ cant_migrate();
|
||
+ this_cpu_inc(printk_context);
|
||
+}
|
||
+
|
||
+void __printk_deferred_exit(void)
|
||
+{
|
||
+ cant_migrate();
|
||
+ this_cpu_dec(printk_context);
|
||
+}
|
||
+
|
||
asmlinkage int vprintk(const char *fmt, va_list args)
|
||
{
|
||
#ifdef CONFIG_KGDB_KDB
|
||
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
|
||
index 46612fb15..f893f4cfd 100644
|
||
--- a/kernel/rcu/rcutorture.c
|
||
+++ b/kernel/rcu/rcutorture.c
|
||
@@ -2409,6 +2409,12 @@ static int rcutorture_booster_init(unsigned int cpu)
|
||
WARN_ON_ONCE(!t);
|
||
sp.sched_priority = 2;
|
||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+ t = per_cpu(timersd, cpu);
|
||
+ WARN_ON_ONCE(!t);
|
||
+ sp.sched_priority = 2;
|
||
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||
+#endif
|
||
}
|
||
|
||
/* Don't allow time recalculation while creating a new task. */
|
||
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
|
||
index 11a1fac3a..6f085a159 100644
|
||
--- a/kernel/rcu/tree_stall.h
|
||
+++ b/kernel/rcu/tree_stall.h
|
||
@@ -8,6 +8,7 @@
|
||
*/
|
||
|
||
#include <linux/kvm_para.h>
|
||
+#include <linux/console.h>
|
||
|
||
//////////////////////////////////////////////////////////////////////////////
|
||
//
|
||
@@ -604,6 +605,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||
if (rcu_stall_is_suppressed())
|
||
return;
|
||
|
||
+ nbcon_cpu_emergency_enter();
|
||
+
|
||
/*
|
||
* OK, time to rat on our buddy...
|
||
* See Documentation/RCU/stallwarn.rst for info on how to debug
|
||
@@ -658,6 +661,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||
panic_on_rcu_stall();
|
||
|
||
rcu_force_quiescent_state(); /* Kick them all. */
|
||
+
|
||
+ nbcon_cpu_emergency_exit();
|
||
}
|
||
|
||
static void print_cpu_stall(unsigned long gps)
|
||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||
index de6dd807d..987b959aa 100644
|
||
--- a/kernel/sched/core.c
|
||
+++ b/kernel/sched/core.c
|
||
@@ -901,14 +901,15 @@ static inline void hrtick_rq_init(struct rq *rq)
|
||
|
||
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
|
||
/*
|
||
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
|
||
+ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
|
||
* this avoids any races wrt polling state changes and thereby avoids
|
||
* spurious IPIs.
|
||
*/
|
||
-static inline bool set_nr_and_not_polling(struct task_struct *p)
|
||
+static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
|
||
{
|
||
struct thread_info *ti = task_thread_info(p);
|
||
- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
|
||
+
|
||
+ return !(fetch_or(&ti->flags, 1 << tif_bit) & _TIF_POLLING_NRFLAG);
|
||
}
|
||
|
||
/*
|
||
@@ -925,7 +926,7 @@ static bool set_nr_if_polling(struct task_struct *p)
|
||
for (;;) {
|
||
if (!(val & _TIF_POLLING_NRFLAG))
|
||
return false;
|
||
- if (val & _TIF_NEED_RESCHED)
|
||
+ if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
|
||
return true;
|
||
if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
|
||
break;
|
||
@@ -934,9 +935,9 @@ static bool set_nr_if_polling(struct task_struct *p)
|
||
}
|
||
|
||
#else
|
||
-static inline bool set_nr_and_not_polling(struct task_struct *p)
|
||
+static inline bool set_nr_and_not_polling(struct task_struct *p, int tif_bit)
|
||
{
|
||
- set_tsk_need_resched(p);
|
||
+ set_tsk_thread_flag(p, tif_bit);
|
||
return true;
|
||
}
|
||
|
||
@@ -1041,28 +1042,47 @@ void wake_up_q(struct wake_q_head *head)
|
||
* might also involve a cross-CPU call to trigger the scheduler on
|
||
* the target CPU.
|
||
*/
|
||
-void resched_curr(struct rq *rq)
|
||
+static void __resched_curr(struct rq *rq, int lazy)
|
||
{
|
||
+ int cpu, tif_bit = TIF_NEED_RESCHED + lazy;
|
||
struct task_struct *curr = rq->curr;
|
||
- int cpu;
|
||
|
||
lockdep_assert_rq_held(rq);
|
||
|
||
- if (test_tsk_need_resched(curr))
|
||
+ if (unlikely(test_tsk_thread_flag(curr, tif_bit)))
|
||
return;
|
||
|
||
cpu = cpu_of(rq);
|
||
|
||
if (cpu == smp_processor_id()) {
|
||
- set_tsk_need_resched(curr);
|
||
- set_preempt_need_resched();
|
||
+ set_tsk_thread_flag(curr, tif_bit);
|
||
+ if (!lazy)
|
||
+ set_preempt_need_resched();
|
||
return;
|
||
}
|
||
|
||
- if (set_nr_and_not_polling(curr))
|
||
- smp_send_reschedule(cpu);
|
||
- else
|
||
+ if (set_nr_and_not_polling(curr, tif_bit)) {
|
||
+ if (!lazy)
|
||
+ smp_send_reschedule(cpu);
|
||
+ } else {
|
||
trace_sched_wake_idle_without_ipi(cpu);
|
||
+ }
|
||
+}
|
||
+
|
||
+void resched_curr(struct rq *rq)
|
||
+{
|
||
+ __resched_curr(rq, 0);
|
||
+}
|
||
+
|
||
+void resched_curr_lazy(struct rq *rq)
|
||
+{
|
||
+ int lazy = IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) && !sched_feat(FORCE_NEED_RESCHED) ?
|
||
+ TIF_NEED_RESCHED_LAZY_OFFSET : 0;
|
||
+
|
||
+ if (lazy && unlikely(test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED)))
|
||
+ return;
|
||
+
|
||
+ __resched_curr(rq, lazy);
|
||
}
|
||
|
||
void resched_cpu(int cpu)
|
||
@@ -1135,7 +1155,7 @@ static void wake_up_idle_cpu(int cpu)
|
||
if (cpu == smp_processor_id())
|
||
return;
|
||
|
||
- if (set_nr_and_not_polling(rq->idle))
|
||
+ if (set_nr_and_not_polling(rq->idle, TIF_NEED_RESCHED))
|
||
smp_send_reschedule(cpu);
|
||
else
|
||
trace_sched_wake_idle_without_ipi(cpu);
|
||
@@ -6775,10 +6795,14 @@ void __noreturn do_task_dead(void)
|
||
|
||
static inline void sched_submit_work(struct task_struct *tsk)
|
||
{
|
||
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
|
||
unsigned int task_flags;
|
||
|
||
- if (task_is_running(tsk))
|
||
- return;
|
||
+ /*
|
||
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
|
||
+ * will use a blocking primitive -- which would lead to recursion.
|
||
+ */
|
||
+ lock_map_acquire_try(&sched_map);
|
||
|
||
task_flags = tsk->flags;
|
||
/*
|
||
@@ -6804,6 +6828,8 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||
* make sure to submit it to avoid deadlocks.
|
||
*/
|
||
blk_flush_plug(tsk->plug, true);
|
||
+
|
||
+ lock_map_release(&sched_map);
|
||
}
|
||
|
||
static void sched_update_worker(struct task_struct *tsk)
|
||
@@ -6818,16 +6844,26 @@ static void sched_update_worker(struct task_struct *tsk)
|
||
}
|
||
}
|
||
|
||
-asmlinkage __visible void __sched schedule(void)
|
||
+static __always_inline void __schedule_loop(unsigned int sched_mode)
|
||
{
|
||
- struct task_struct *tsk = current;
|
||
-
|
||
- sched_submit_work(tsk);
|
||
do {
|
||
preempt_disable();
|
||
- __schedule(SM_NONE);
|
||
+ __schedule(sched_mode);
|
||
sched_preempt_enable_no_resched();
|
||
} while (need_resched());
|
||
+}
|
||
+
|
||
+asmlinkage __visible void __sched schedule(void)
|
||
+{
|
||
+ struct task_struct *tsk = current;
|
||
+
|
||
+#ifdef CONFIG_RT_MUTEXES
|
||
+ lockdep_assert(!tsk->sched_rt_mutex);
|
||
+#endif
|
||
+
|
||
+ if (!task_is_running(tsk))
|
||
+ sched_submit_work(tsk);
|
||
+ __schedule_loop(SM_NONE);
|
||
sched_update_worker(tsk);
|
||
}
|
||
EXPORT_SYMBOL(schedule);
|
||
@@ -6891,11 +6927,7 @@ void __sched schedule_preempt_disabled(void)
|
||
#ifdef CONFIG_PREEMPT_RT
|
||
void __sched notrace schedule_rtlock(void)
|
||
{
|
||
- do {
|
||
- preempt_disable();
|
||
- __schedule(SM_RTLOCK_WAIT);
|
||
- sched_preempt_enable_no_resched();
|
||
- } while (need_resched());
|
||
+ __schedule_loop(SM_RTLOCK_WAIT);
|
||
}
|
||
NOKPROBE_SYMBOL(schedule_rtlock);
|
||
#endif
|
||
@@ -7091,6 +7123,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
|
||
|
||
#ifdef CONFIG_RT_MUTEXES
|
||
|
||
+/*
|
||
+ * Would be more useful with typeof()/auto_type but they don't mix with
|
||
+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
|
||
+ * name such that if someone were to implement this function we get to compare
|
||
+ * notes.
|
||
+ */
|
||
+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
|
||
+
|
||
+void rt_mutex_pre_schedule(void)
|
||
+{
|
||
+ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
|
||
+ sched_submit_work(current);
|
||
+}
|
||
+
|
||
+void rt_mutex_schedule(void)
|
||
+{
|
||
+ lockdep_assert(current->sched_rt_mutex);
|
||
+ __schedule_loop(SM_NONE);
|
||
+}
|
||
+
|
||
+void rt_mutex_post_schedule(void)
|
||
+{
|
||
+ sched_update_worker(current);
|
||
+ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
|
||
+}
|
||
+
|
||
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
|
||
{
|
||
if (pi_task)
|
||
@@ -8953,6 +9011,21 @@ static inline void preempt_dynamic_init(void) { }
|
||
|
||
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
|
||
|
||
+/*
|
||
+ * task_is_pi_boosted - Check if task has been PI boosted.
|
||
+ * @p: Task to check.
|
||
+ *
|
||
+ * Return true if task is subject to priority inheritance.
|
||
+ */
|
||
+bool task_is_pi_boosted(const struct task_struct *p)
|
||
+{
|
||
+ int prio = p->prio;
|
||
+
|
||
+ if (!rt_prio(prio))
|
||
+ return false;
|
||
+ return prio != p->normal_prio;
|
||
+}
|
||
+
|
||
/**
|
||
* yield - yield the current processor to other threads.
|
||
*
|
||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
|
||
index 8b3063398..76530c8b3 100644
|
||
--- a/kernel/sched/debug.c
|
||
+++ b/kernel/sched/debug.c
|
||
@@ -333,6 +333,23 @@ static const struct file_operations sched_debug_fops = {
|
||
.release = seq_release,
|
||
};
|
||
|
||
+static ssize_t sched_hog_write(struct file *filp, const char __user *ubuf,
|
||
+ size_t cnt, loff_t *ppos)
|
||
+{
|
||
+ unsigned long end = jiffies + 60 * HZ;
|
||
+
|
||
+ for (; time_before(jiffies, end) && !signal_pending(current);)
|
||
+ cpu_relax();
|
||
+
|
||
+ return cnt;
|
||
+}
|
||
+
|
||
+static const struct file_operations sched_hog_fops = {
|
||
+ .write = sched_hog_write,
|
||
+ .open = simple_open,
|
||
+ .llseek = default_llseek,
|
||
+};
|
||
+
|
||
static struct dentry *debugfs_sched;
|
||
|
||
static __init int sched_init_debug(void)
|
||
@@ -374,6 +391,8 @@ static __init int sched_init_debug(void)
|
||
|
||
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
|
||
|
||
+ debugfs_create_file("hog", 0200, debugfs_sched, NULL, &sched_hog_fops);
|
||
+
|
||
return 0;
|
||
}
|
||
late_initcall(sched_init_debug);
|
||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||
index c9a4ea466..de39191ea 100644
|
||
--- a/kernel/sched/fair.c
|
||
+++ b/kernel/sched/fair.c
|
||
@@ -1170,8 +1170,10 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
||
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
|
||
* this is probably good enough.
|
||
*/
|
||
-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||
+static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se, bool tick)
|
||
{
|
||
+ struct rq *rq = rq_of(cfs_rq);
|
||
+
|
||
if ((s64)(se->vruntime - se->deadline) < 0)
|
||
return;
|
||
|
||
@@ -1190,10 +1192,19 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||
/*
|
||
* The task has consumed its request, reschedule.
|
||
*/
|
||
- if (cfs_rq->nr_running > 1) {
|
||
- resched_curr(rq_of(cfs_rq));
|
||
- clear_buddies(cfs_rq, se);
|
||
+ if (cfs_rq->nr_running < 2)
|
||
+ return;
|
||
+
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) || sched_feat(FORCE_NEED_RESCHED)) {
|
||
+ resched_curr(rq);
|
||
+ } else {
|
||
+ /* Did the task ignore the lazy reschedule request? */
|
||
+ if (tick && test_tsk_thread_flag(rq->curr, TIF_NEED_RESCHED_LAZY))
|
||
+ resched_curr(rq);
|
||
+ else
|
||
+ resched_curr_lazy(rq);
|
||
}
|
||
+ clear_buddies(cfs_rq, se);
|
||
}
|
||
|
||
#include "pelt.h"
|
||
@@ -1301,7 +1312,7 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
|
||
/*
|
||
* Update the current task's runtime statistics.
|
||
*/
|
||
-static void update_curr(struct cfs_rq *cfs_rq)
|
||
+static void __update_curr(struct cfs_rq *cfs_rq, bool tick)
|
||
{
|
||
struct sched_entity *curr = cfs_rq->curr;
|
||
u64 now = rq_clock_task(rq_of(cfs_rq));
|
||
@@ -1328,7 +1339,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
||
schedstat_add(cfs_rq->exec_clock, delta_exec);
|
||
|
||
curr->vruntime += calc_delta_fair(delta_exec, curr);
|
||
- update_deadline(cfs_rq, curr);
|
||
+ update_deadline(cfs_rq, curr, tick);
|
||
update_min_vruntime(cfs_rq);
|
||
|
||
if (entity_is_task(curr)) {
|
||
@@ -1342,6 +1353,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
||
account_cfs_rq_runtime(cfs_rq, delta_exec);
|
||
}
|
||
|
||
+static inline void update_curr(struct cfs_rq *cfs_rq)
|
||
+{
|
||
+ __update_curr(cfs_rq, false);
|
||
+}
|
||
+
|
||
static void update_curr_fair(struct rq *rq)
|
||
{
|
||
update_curr(cfs_rq_of(&rq->curr->se));
|
||
@@ -5656,7 +5672,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||
/*
|
||
* Update run-time statistics of the 'current'.
|
||
*/
|
||
- update_curr(cfs_rq);
|
||
+ __update_curr(cfs_rq, true);
|
||
|
||
/*
|
||
* Ensure that runnable average is periodically updated.
|
||
@@ -5670,7 +5686,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||
* validating it and just reschedule.
|
||
*/
|
||
if (queued) {
|
||
- resched_curr(rq_of(cfs_rq));
|
||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||
return;
|
||
}
|
||
/*
|
||
@@ -5816,7 +5832,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
||
* hierarchy can be throttled
|
||
*/
|
||
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
|
||
- resched_curr(rq_of(cfs_rq));
|
||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||
}
|
||
|
||
static __always_inline
|
||
@@ -6115,7 +6131,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||
|
||
/* Determine whether we need to wake up potentially idle CPU: */
|
||
if (rq->curr == rq->idle && rq->cfs.nr_running)
|
||
- resched_curr(rq);
|
||
+ resched_curr_lazy(rq);
|
||
}
|
||
|
||
#ifdef CONFIG_SMP
|
||
@@ -7357,7 +7373,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||
|
||
if (delta < 0) {
|
||
if (task_current(rq, p))
|
||
- resched_curr(rq);
|
||
+ resched_curr_lazy(rq);
|
||
return;
|
||
}
|
||
hrtick_start(rq, delta);
|
||
@@ -9341,7 +9357,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||
* prevents us from potentially nominating it as a false LAST_BUDDY
|
||
* below.
|
||
*/
|
||
- if (test_tsk_need_resched(curr))
|
||
+ if (need_resched())
|
||
return;
|
||
|
||
if (!sched_feat(WAKEUP_PREEMPTION))
|
||
@@ -9384,7 +9400,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||
return;
|
||
|
||
preempt:
|
||
- resched_curr(rq);
|
||
+ resched_curr_lazy(rq);
|
||
}
|
||
|
||
#ifdef CONFIG_QOS_SCHED
|
||
@@ -9873,8 +9889,7 @@ static bool _qos_smt_check_need_resched(int this_cpu, struct rq *rq)
|
||
|
||
/*
|
||
* There are two cases rely on the set need_resched to drive away
|
||
- * offline task:
|
||
- * a) The qos_smt_status of siblings cpu is online, the task of curr cpu is offline;
|
||
+ * offline task<73><6B>? * a) The qos_smt_status of siblings cpu is online, the task of curr cpu is offline;
|
||
* b) The qos_smt_status of siblings cpu is offline, the task of curr cpu is idle,
|
||
* and current cpu only has SCHED_IDLE tasks enqueued.
|
||
*/
|
||
@@ -14248,7 +14263,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
|
||
*/
|
||
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
|
||
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
|
||
- resched_curr(rq);
|
||
+ resched_curr_lazy(rq);
|
||
}
|
||
|
||
/*
|
||
@@ -14564,7 +14579,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||
*/
|
||
if (task_current(rq, p)) {
|
||
if (p->prio > oldprio)
|
||
- resched_curr(rq);
|
||
+ resched_curr_lazy(rq);
|
||
} else
|
||
check_preempt_curr(rq, p, 0);
|
||
}
|
||
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
||
index 26b1a03bd..a08f2fcd7 100644
|
||
--- a/kernel/sched/features.h
|
||
+++ b/kernel/sched/features.h
|
||
@@ -98,7 +98,7 @@ SCHED_FEAT(UTIL_EST_FASTUP, true)
|
||
SCHED_FEAT(LATENCY_WARN, false)
|
||
|
||
SCHED_FEAT(HZ_BW, true)
|
||
-
|
||
+SCHED_FEAT(FORCE_NEED_RESCHED, false)
|
||
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
|
||
/*
|
||
* Use util_avg of bottom-Level taskgroup
|
||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
|
||
index 5007b25c5..95e1b3df1 100644
|
||
--- a/kernel/sched/idle.c
|
||
+++ b/kernel/sched/idle.c
|
||
@@ -57,8 +57,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
|
||
ct_cpuidle_enter();
|
||
|
||
raw_local_irq_enable();
|
||
- while (!tif_need_resched() &&
|
||
- (cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||
+ while (!need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||
cpu_relax();
|
||
raw_local_irq_disable();
|
||
|
||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||
index 77bb7ee8c..6dedad4f4 100644
|
||
--- a/kernel/sched/rt.c
|
||
+++ b/kernel/sched/rt.c
|
||
@@ -2256,8 +2256,11 @@ static int rto_next_cpu(struct root_domain *rd)
|
||
|
||
rd->rto_cpu = cpu;
|
||
|
||
- if (cpu < nr_cpu_ids)
|
||
+ if (cpu < nr_cpu_ids) {
|
||
+ if (!has_pushable_tasks(cpu_rq(cpu)))
|
||
+ continue;
|
||
return cpu;
|
||
+ }
|
||
|
||
rd->rto_cpu = -1;
|
||
|
||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||
index 761870540..f9918a975 100644
|
||
--- a/kernel/sched/sched.h
|
||
+++ b/kernel/sched/sched.h
|
||
@@ -2601,6 +2601,7 @@ extern void init_sched_fair_class(void);
|
||
extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
|
||
|
||
extern void resched_curr(struct rq *rq);
|
||
+extern void resched_curr_lazy(struct rq *rq);
|
||
extern void resched_cpu(int cpu);
|
||
|
||
extern struct rt_bandwidth def_rt_bandwidth;
|
||
diff --git a/kernel/signal.c b/kernel/signal.c
|
||
index c73873d67..aef5dcf43 100644
|
||
--- a/kernel/signal.c
|
||
+++ b/kernel/signal.c
|
||
@@ -2334,15 +2334,35 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
|
||
do_notify_parent_cldstop(current, false, why);
|
||
|
||
/*
|
||
- * Don't want to allow preemption here, because
|
||
- * sys_ptrace() needs this task to be inactive.
|
||
+ * The previous do_notify_parent_cldstop() invocation woke ptracer.
|
||
+ * One a PREEMPTION kernel this can result in preemption requirement
|
||
+ * which will be fulfilled after read_unlock() and the ptracer will be
|
||
+ * put on the CPU.
|
||
+ * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
|
||
+ * this task wait in schedule(). If this task gets preempted then it
|
||
+ * remains enqueued on the runqueue. The ptracer will observe this and
|
||
+ * then sleep for a delay of one HZ tick. In the meantime this task
|
||
+ * gets scheduled, enters schedule() and will wait for the ptracer.
|
||
*
|
||
- * XXX: implement read_unlock_no_resched().
|
||
+ * This preemption point is not bad from correctness point of view but
|
||
+ * extends the runtime by one HZ tick time due to the ptracer's sleep.
|
||
+ * The preempt-disable section ensures that there will be no preemption
|
||
+ * between unlock and schedule() and so improving the performance since
|
||
+ * the ptracer has no reason to sleep.
|
||
+ *
|
||
+ * On PREEMPT_RT locking tasklist_lock does not disable preemption.
|
||
+ * Therefore the task can be preempted (after
|
||
+ * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
|
||
+ * is no benefit in doing this. The optimisation is harmful on
|
||
+ * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
|
||
+ * be acquired with disabled preemption.
|
||
*/
|
||
- preempt_disable();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ preempt_disable();
|
||
read_unlock(&tasklist_lock);
|
||
cgroup_enter_frozen();
|
||
- preempt_enable_no_resched();
|
||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||
+ preempt_enable_no_resched();
|
||
schedule();
|
||
cgroup_leave_frozen(true);
|
||
|
||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||
index cd8770b2f..10099e684 100644
|
||
--- a/kernel/softirq.c
|
||
+++ b/kernel/softirq.c
|
||
@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
||
}
|
||
EXPORT_SYMBOL(__local_bh_enable_ip);
|
||
|
||
+void softirq_preempt(void)
|
||
+{
|
||
+ if (WARN_ON_ONCE(!preemptible()))
|
||
+ return;
|
||
+
|
||
+ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
|
||
+ return;
|
||
+
|
||
+ __local_bh_enable(SOFTIRQ_OFFSET, true);
|
||
+ /* preemption point */
|
||
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
|
||
+}
|
||
+
|
||
/*
|
||
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
|
||
* to acquire the per CPU local lock for reentrancy protection.
|
||
@@ -623,6 +636,24 @@ static inline void tick_irq_exit(void)
|
||
#endif
|
||
}
|
||
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+DEFINE_PER_CPU(struct task_struct *, timersd);
|
||
+DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
|
||
+
|
||
+static void wake_timersd(void)
|
||
+{
|
||
+ struct task_struct *tsk = __this_cpu_read(timersd);
|
||
+
|
||
+ if (tsk)
|
||
+ wake_up_process(tsk);
|
||
+}
|
||
+
|
||
+#else
|
||
+
|
||
+static inline void wake_timersd(void) { }
|
||
+
|
||
+#endif
|
||
+
|
||
static inline void __irq_exit_rcu(void)
|
||
{
|
||
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
|
||
@@ -635,6 +666,10 @@ static inline void __irq_exit_rcu(void)
|
||
if (!in_interrupt() && local_softirq_pending())
|
||
invoke_softirq();
|
||
|
||
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
|
||
+ !(in_nmi() | in_hardirq()))
|
||
+ wake_timersd();
|
||
+
|
||
tick_irq_exit();
|
||
}
|
||
|
||
@@ -1014,12 +1049,70 @@ static struct smp_hotplug_thread softirq_threads = {
|
||
.thread_comm = "ksoftirqd/%u",
|
||
};
|
||
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+static void timersd_setup(unsigned int cpu)
|
||
+{
|
||
+ sched_set_fifo_low(current);
|
||
+}
|
||
+
|
||
+static int timersd_should_run(unsigned int cpu)
|
||
+{
|
||
+ return local_pending_timers();
|
||
+}
|
||
+
|
||
+static void run_timersd(unsigned int cpu)
|
||
+{
|
||
+ unsigned int timer_si;
|
||
+
|
||
+ ksoftirqd_run_begin();
|
||
+
|
||
+ timer_si = local_pending_timers();
|
||
+ __this_cpu_write(pending_timer_softirq, 0);
|
||
+ or_softirq_pending(timer_si);
|
||
+
|
||
+ __do_softirq();
|
||
+
|
||
+ ksoftirqd_run_end();
|
||
+}
|
||
+
|
||
+static void raise_ktimers_thread(unsigned int nr)
|
||
+{
|
||
+ trace_softirq_raise(nr);
|
||
+ __this_cpu_or(pending_timer_softirq, 1 << nr);
|
||
+}
|
||
+
|
||
+void raise_hrtimer_softirq(void)
|
||
+{
|
||
+ raise_ktimers_thread(HRTIMER_SOFTIRQ);
|
||
+}
|
||
+
|
||
+void raise_timer_softirq(void)
|
||
+{
|
||
+ unsigned long flags;
|
||
+
|
||
+ local_irq_save(flags);
|
||
+ raise_ktimers_thread(TIMER_SOFTIRQ);
|
||
+ wake_timersd();
|
||
+ local_irq_restore(flags);
|
||
+}
|
||
+
|
||
+static struct smp_hotplug_thread timer_threads = {
|
||
+ .store = &timersd,
|
||
+ .setup = timersd_setup,
|
||
+ .thread_should_run = timersd_should_run,
|
||
+ .thread_fn = run_timersd,
|
||
+ .thread_comm = "ktimers/%u",
|
||
+};
|
||
+#endif
|
||
+
|
||
static __init int spawn_ksoftirqd(void)
|
||
{
|
||
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
|
||
takeover_tasklets);
|
||
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
|
||
-
|
||
+#ifdef CONFIG_PREEMPT_RT
|
||
+ BUG_ON(smpboot_register_percpu_thread(&timer_threads));
|
||
+#endif
|
||
return 0;
|
||
}
|
||
early_initcall(spawn_ksoftirqd);
|
||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
||
index 57e5cb36f..c4ae45701 100644
|
||
--- a/kernel/time/hrtimer.c
|
||
+++ b/kernel/time/hrtimer.c
|
||
@@ -1812,7 +1812,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
|
||
cpu_base->softirq_expires_next = KTIME_MAX;
|
||
cpu_base->softirq_activated = 1;
|
||
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
+ raise_hrtimer_softirq();
|
||
}
|
||
|
||
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
|
||
@@ -1925,7 +1925,7 @@ void hrtimer_run_queues(void)
|
||
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
|
||
cpu_base->softirq_expires_next = KTIME_MAX;
|
||
cpu_base->softirq_activated = 1;
|
||
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
+ raise_hrtimer_softirq();
|
||
}
|
||
|
||
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
|
||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
||
index 55cbc49f7..1a0ed106b 100644
|
||
--- a/kernel/time/tick-sched.c
|
||
+++ b/kernel/time/tick-sched.c
|
||
@@ -795,7 +795,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||
|
||
static inline bool local_timer_softirq_pending(void)
|
||
{
|
||
- return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
|
||
+ return local_pending_timers() & BIT(TIMER_SOFTIRQ);
|
||
}
|
||
|
||
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||
index 63a8ce717..b3fbe97d1 100644
|
||
--- a/kernel/time/timer.c
|
||
+++ b/kernel/time/timer.c
|
||
@@ -1470,9 +1470,16 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
|
||
*/
|
||
static void timer_sync_wait_running(struct timer_base *base)
|
||
{
|
||
- if (atomic_read(&base->timer_waiters)) {
|
||
+ bool need_preempt;
|
||
+
|
||
+ need_preempt = task_is_pi_boosted(current);
|
||
+ if (need_preempt || atomic_read(&base->timer_waiters)) {
|
||
raw_spin_unlock_irq(&base->lock);
|
||
spin_unlock(&base->expiry_lock);
|
||
+
|
||
+ if (need_preempt)
|
||
+ softirq_preempt();
|
||
+
|
||
spin_lock(&base->expiry_lock);
|
||
raw_spin_lock_irq(&base->lock);
|
||
}
|
||
@@ -2054,7 +2061,7 @@ static void run_local_timers(void)
|
||
if (time_before(jiffies, base->next_expiry))
|
||
return;
|
||
}
|
||
- raise_softirq(TIMER_SOFTIRQ);
|
||
+ raise_timer_softirq();
|
||
}
|
||
|
||
/*
|
||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
||
index 2412c0a46..939d67174 100644
|
||
--- a/kernel/trace/trace.c
|
||
+++ b/kernel/trace/trace.c
|
||
@@ -2706,6 +2706,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
|
||
|
||
if (tif_need_resched())
|
||
trace_flags |= TRACE_FLAG_NEED_RESCHED;
|
||
+ if (tif_need_resched_lazy())
|
||
+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
|
||
if (test_preempt_need_resched())
|
||
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
|
||
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
|
||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
|
||
index 2b948d35f..959f09ab8 100644
|
||
--- a/kernel/trace/trace_output.c
|
||
+++ b/kernel/trace/trace_output.c
|
||
@@ -464,17 +464,29 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||
(entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
|
||
(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
|
||
bh_off ? 'b' :
|
||
- (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
|
||
+ !IS_ENABLED(CONFIG_TRACE_IRQFLAGS_SUPPORT) ? 'X' :
|
||
'.';
|
||
|
||
- switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
|
||
+ switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
|
||
TRACE_FLAG_PREEMPT_RESCHED)) {
|
||
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
|
||
+ need_resched = 'B';
|
||
+ break;
|
||
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
|
||
need_resched = 'N';
|
||
break;
|
||
+ case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
|
||
+ need_resched = 'L';
|
||
+ break;
|
||
+ case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
|
||
+ need_resched = 'b';
|
||
+ break;
|
||
case TRACE_FLAG_NEED_RESCHED:
|
||
need_resched = 'n';
|
||
break;
|
||
+ case TRACE_FLAG_NEED_RESCHED_LAZY:
|
||
+ need_resched = 'l';
|
||
+ break;
|
||
case TRACE_FLAG_PREEMPT_RESCHED:
|
||
need_resched = 'p';
|
||
break;
|
||
diff --git a/net/core/dev.c b/net/core/dev.c
|
||
index 75e94b327..5d021cc1f 100644
|
||
--- a/net/core/dev.c
|
||
+++ b/net/core/dev.c
|
||
@@ -4723,15 +4723,6 @@ static void rps_trigger_softirq(void *data)
|
||
|
||
#endif /* CONFIG_RPS */
|
||
|
||
-/* Called from hardirq (IPI) context */
|
||
-static void trigger_rx_softirq(void *data)
|
||
-{
|
||
- struct softnet_data *sd = data;
|
||
-
|
||
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
||
- smp_store_release(&sd->defer_ipi_scheduled, 0);
|
||
-}
|
||
-
|
||
/*
|
||
* After we queued a packet into sd->input_pkt_queue,
|
||
* we need to make sure this queue is serviced soon.
|
||
@@ -6711,6 +6702,32 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
||
}
|
||
}
|
||
|
||
+#ifndef CONFIG_PREEMPT_RT
|
||
+
|
||
+/* Called from hardirq (IPI) context */
|
||
+static void trigger_rx_softirq(void *data)
|
||
+{
|
||
+ struct softnet_data *sd = data;
|
||
+
|
||
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
||
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
|
||
+}
|
||
+
|
||
+#else
|
||
+
|
||
+static void trigger_rx_softirq(struct work_struct *defer_work)
|
||
+{
|
||
+ struct softnet_data *sd;
|
||
+
|
||
+ sd = container_of(defer_work, struct softnet_data, defer_work);
|
||
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
|
||
+ local_bh_disable();
|
||
+ skb_defer_free_flush(sd);
|
||
+ local_bh_enable();
|
||
+}
|
||
+
|
||
+#endif
|
||
+
|
||
static int napi_threaded_poll(void *data)
|
||
{
|
||
struct napi_struct *napi = data;
|
||
@@ -11648,7 +11665,11 @@ static int __init net_dev_init(void)
|
||
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
|
||
sd->cpu = i;
|
||
#endif
|
||
+#ifndef CONFIG_PREEMPT_RT
|
||
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
|
||
+#else
|
||
+ INIT_WORK(&sd->defer_work, trigger_rx_softirq);
|
||
+#endif
|
||
spin_lock_init(&sd->defer_lock);
|
||
|
||
init_gro_hash(&sd->backlog);
|
||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
||
index a9efd6fb2..ce127b20d 100644
|
||
--- a/net/core/skbuff.c
|
||
+++ b/net/core/skbuff.c
|
||
@@ -6865,8 +6865,13 @@ nodefer: __kfree_skb(skb);
|
||
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
|
||
* if we are unlucky enough (this seems very unlikely).
|
||
*/
|
||
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
|
||
+ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
|
||
+#ifndef CONFIG_PREEMPT_RT
|
||
smp_call_function_single_async(cpu, &sd->defer_csd);
|
||
+#else
|
||
+ schedule_work_on(cpu, &sd->defer_work);
|
||
+#endif
|
||
+ }
|
||
}
|
||
|
||
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
|
||
--
|
||
2.41.0
|
||
|