Skip to content

Commit 5c37f1a

Browse files
James MorseMarc Zyngier
authored andcommitted
KVM: arm64: Ask the compiler to __always_inline functions used at HYP
On non VHE CPUs, KVM's __hyp_text contains code run at EL2 while the rest of the kernel runs at EL1. This code lives in its own section with start and end markers so we can map it to EL2. The compiler may decide not to inline static-inline functions from the header file. It may also decide not to put these out-of-line functions in the same section, meaning they aren't mapped when called at EL2. Clang-9 does exactly this with __kern_hyp_va() and a few others when x18 is reserved for the shadow call stack. Add the additional __always_ hint to all the static-inlines that are called from a hyp file. Signed-off-by: James Morse <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] ---- kvm_get_hyp_vector() pulls in all the regular per-cpu accessors and this_cpu_has_cap(), fortunately its only called for VHE.
1 parent b3f15ec commit 5c37f1a

File tree

5 files changed

+29
-28
lines changed

5 files changed

+29
-28
lines changed

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
3232
isb();
3333
}
3434

35-
static inline void gic_write_dir(u32 irq)
35+
static __always_inline void gic_write_dir(u32 irq)
3636
{
3737
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
3838
isb();

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -581,7 +581,7 @@ static inline bool system_supports_sve(void)
581581
cpus_have_const_cap(ARM64_SVE);
582582
}
583583

584-
static inline bool system_supports_cnp(void)
584+
static __always_inline bool system_supports_cnp(void)
585585
{
586586
return IS_ENABLED(CONFIG_ARM64_CNP) &&
587587
cpus_have_const_cap(ARM64_HAS_CNP);

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
3636
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3737
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3838

39-
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
39+
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
4040
{
4141
return !(vcpu->arch.hcr_el2 & HCR_RW);
4242
}
@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
127127
vcpu->arch.vsesr_el2 = vsesr;
128128
}
129129

130-
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
130+
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
131131
{
132132
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
133133
}
@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
153153
*__vcpu_elr_el1(vcpu) = v;
154154
}
155155

156-
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
156+
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
157157
{
158158
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
159159
}
160160

161-
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
161+
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
162162
{
163163
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
164164
}
165165

166-
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
166+
static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
167167
{
168168
if (vcpu_mode_is_32bit(vcpu))
169169
return kvm_condition_valid32(vcpu);
@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
181181
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
182182
* AArch32 with banked registers.
183183
*/
184-
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
184+
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
185185
u8 reg_num)
186186
{
187187
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
188188
}
189189

190-
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
190+
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
191191
unsigned long val)
192192
{
193193
if (reg_num != 31)
@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
264264
return mode != PSR_MODE_EL0t;
265265
}
266266

267-
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
267+
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
268268
{
269269
return vcpu->arch.fault.esr_el2;
270270
}
271271

272-
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
272+
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
273273
{
274274
u32 esr = kvm_vcpu_get_hsr(vcpu);
275275

@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
279279
return -1;
280280
}
281281

282-
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
282+
static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
283283
{
284284
return vcpu->arch.fault.far_el2;
285285
}
286286

287-
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
287+
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
288288
{
289289
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
290290
}
@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
299299
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
300300
}
301301

302-
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
302+
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
303303
{
304304
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
305305
}
@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
319319
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
320320
}
321321

322-
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
322+
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
323323
{
324324
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
325325
}
326326

327-
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
327+
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
328328
{
329329
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
330330
}
331331

332-
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
332+
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
333333
{
334334
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
335335
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
340340
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
341341
}
342342

343-
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
343+
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
344344
{
345345
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
346346
}
347347

348348
/* This one is not specific to Data Abort */
349-
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
349+
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
350350
{
351351
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
352352
}
353353

354-
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
354+
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
355355
{
356356
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
357357
}
@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
361361
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
362362
}
363363

364-
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
364+
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
365365
{
366366
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
367367
}
368368

369-
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
369+
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
370370
{
371371
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
372372
}
373373

374-
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
374+
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
375375
{
376376
switch (kvm_vcpu_trap_get_fault(vcpu)) {
377377
case FSC_SEA:
@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
390390
}
391391
}
392392

393-
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
393+
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
394394
{
395395
u32 esr = kvm_vcpu_get_hsr(vcpu);
396396
return ESR_ELx_SYS64_ISS_RT(esr);
@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
504504
return data; /* Leave LE untouched */
505505
}
506506

507-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
507+
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
508508
{
509509
if (vcpu_mode_is_32bit(vcpu))
510510
kvm_skip_instr32(vcpu, is_wide_instr);
@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
519519
* Skip an instruction which has been emulated at hyp while most guest sysregs
520520
* are live.
521521
*/
522-
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
522+
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
523523
{
524524
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
525525
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
9393
__le32 *origptr, __le32 *updptr, int nr_inst);
9494
void kvm_compute_layout(void);
9595

96-
static inline unsigned long __kern_hyp_va(unsigned long v)
96+
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
9797
{
9898
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
9999
"ror %0, %0, #1\n"
@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
473473
extern void *__kvm_bp_vect_base;
474474
extern int __kvm_harden_el2_vector_slot;
475475

476+
/* This is only called on a VHE system */
476477
static inline void *kvm_get_hyp_vector(void)
477478
{
478479
struct bp_hardening_data *data = arm64_get_bp_hardening_data();

arch/arm64/include/asm/virt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
8383
return read_sysreg(CurrentEL) == CurrentEL_EL2;
8484
}
8585

86-
static inline bool has_vhe(void)
86+
static __always_inline bool has_vhe(void)
8787
{
8888
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
8989
return true;

0 commit comments

Comments
 (0)