Skip to content

Commit e951445

Browse files
committed
Merge tag 'kvmarm-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm fixes for 5.6, take #1 - Fix compilation on 32bit - Move VHE guest entry/exit into the VHE-specific entry code - Make sure all functions called by the non-VHE HYP code is tagged as __always_inline
2 parents ef935c2 + e43f133 commit e951445

File tree

15 files changed

+84
-77
lines changed

15 files changed

+84
-77
lines changed

arch/arm/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
392392
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
393393
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
394394

395-
static inline void kvm_arm_vhe_guest_enter(void) {}
396-
static inline void kvm_arm_vhe_guest_exit(void) {}
397-
398395
#define KVM_BP_HARDEN_UNKNOWN -1
399396
#define KVM_BP_HARDEN_WA_NEEDED 0
400397
#define KVM_BP_HARDEN_NOT_REQUIRED 1

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
3232
isb();
3333
}
3434

35-
static inline void gic_write_dir(u32 irq)
35+
static __always_inline void gic_write_dir(u32 irq)
3636
{
3737
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
3838
isb();

arch/arm64/include/asm/cache.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
6969
return test_bit(ICACHEF_ALIASING, &__icache_flags);
7070
}
7171

72-
static inline int icache_is_vpipt(void)
72+
static __always_inline int icache_is_vpipt(void)
7373
{
7474
return test_bit(ICACHEF_VPIPT, &__icache_flags);
7575
}

arch/arm64/include/asm/cacheflush.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
145145
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
146146
extern void flush_dcache_page(struct page *);
147147

148-
static inline void __flush_icache_all(void)
148+
static __always_inline void __flush_icache_all(void)
149149
{
150150
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
151151
return;

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
435435
return cpuid_feature_extract_signed_field_width(features, field, 4);
436436
}
437437

438-
static inline unsigned int __attribute_const__
438+
static __always_inline unsigned int __attribute_const__
439439
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
440440
{
441441
return (u64)(features << (64 - width - field)) >> (64 - width);
442442
}
443443

444-
static inline unsigned int __attribute_const__
444+
static __always_inline unsigned int __attribute_const__
445445
cpuid_feature_extract_unsigned_field(u64 features, int field)
446446
{
447447
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
564564
return val == 0x1;
565565
}
566566

567-
static inline bool system_supports_fpsimd(void)
567+
static __always_inline bool system_supports_fpsimd(void)
568568
{
569569
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
570570
}
@@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
575575
!cpus_have_const_cap(ARM64_HAS_PAN);
576576
}
577577

578-
static inline bool system_supports_sve(void)
578+
static __always_inline bool system_supports_sve(void)
579579
{
580580
return IS_ENABLED(CONFIG_ARM64_SVE) &&
581581
cpus_have_const_cap(ARM64_SVE);
582582
}
583583

584-
static inline bool system_supports_cnp(void)
584+
static __always_inline bool system_supports_cnp(void)
585585
{
586586
return IS_ENABLED(CONFIG_ARM64_CNP) &&
587587
cpus_have_const_cap(ARM64_HAS_CNP);

arch/arm64/include/asm/io.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
3434
}
3535

3636
#define __raw_writel __raw_writel
37-
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
37+
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
3838
{
3939
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
4040
}
@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
6969
}
7070

7171
#define __raw_readl __raw_readl
72-
static inline u32 __raw_readl(const volatile void __iomem *addr)
72+
static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
7373
{
7474
u32 val;
7575
asm volatile(ALTERNATIVE("ldr %w0, [%1]",

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
3636
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3737
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
3838

39-
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
39+
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
4040
{
4141
return !(vcpu->arch.hcr_el2 & HCR_RW);
4242
}
@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
127127
vcpu->arch.vsesr_el2 = vsesr;
128128
}
129129

130-
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
130+
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
131131
{
132132
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
133133
}
@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
153153
*__vcpu_elr_el1(vcpu) = v;
154154
}
155155

156-
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
156+
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
157157
{
158158
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
159159
}
160160

161-
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
161+
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
162162
{
163163
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
164164
}
165165

166-
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
166+
static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
167167
{
168168
if (vcpu_mode_is_32bit(vcpu))
169169
return kvm_condition_valid32(vcpu);
@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
181181
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
182182
* AArch32 with banked registers.
183183
*/
184-
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
184+
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
185185
u8 reg_num)
186186
{
187187
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
188188
}
189189

190-
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
190+
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
191191
unsigned long val)
192192
{
193193
if (reg_num != 31)
@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
264264
return mode != PSR_MODE_EL0t;
265265
}
266266

267-
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
267+
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
268268
{
269269
return vcpu->arch.fault.esr_el2;
270270
}
271271

272-
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
272+
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
273273
{
274274
u32 esr = kvm_vcpu_get_hsr(vcpu);
275275

@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
279279
return -1;
280280
}
281281

282-
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
282+
static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
283283
{
284284
return vcpu->arch.fault.far_el2;
285285
}
286286

287-
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
287+
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
288288
{
289289
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
290290
}
@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
299299
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
300300
}
301301

302-
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
302+
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
303303
{
304304
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
305305
}
@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
319319
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
320320
}
321321

322-
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
322+
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
323323
{
324324
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
325325
}
326326

327-
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
327+
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
328328
{
329329
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
330330
}
331331

332-
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
332+
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
333333
{
334334
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
335335
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
340340
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
341341
}
342342

343-
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
343+
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
344344
{
345345
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
346346
}
347347

348348
/* This one is not specific to Data Abort */
349-
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
349+
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
350350
{
351351
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
352352
}
353353

354-
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
354+
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
355355
{
356356
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
357357
}
@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
361361
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
362362
}
363363

364-
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
364+
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
365365
{
366366
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
367367
}
368368

369-
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
369+
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
370370
{
371371
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
372372
}
373373

374-
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
374+
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
375375
{
376376
switch (kvm_vcpu_trap_get_fault(vcpu)) {
377377
case FSC_SEA:
@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
390390
}
391391
}
392392

393-
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
393+
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
394394
{
395395
u32 esr = kvm_vcpu_get_hsr(vcpu);
396396
return ESR_ELx_SYS64_ISS_RT(esr);
@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
504504
return data; /* Leave LE untouched */
505505
}
506506

507-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
507+
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
508508
{
509509
if (vcpu_mode_is_32bit(vcpu))
510510
kvm_skip_instr32(vcpu, is_wide_instr);
@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
519519
* Skip an instruction which has been emulated at hyp while most guest sysregs
520520
* are live.
521521
*/
522-
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
522+
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
523523
{
524524
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
525525
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
626626
static inline void kvm_clr_pmu_events(u32 clr) {}
627627
#endif
628628

629-
static inline void kvm_arm_vhe_guest_enter(void)
630-
{
631-
local_daif_mask();
632-
633-
/*
634-
* Having IRQs masked via PMR when entering the guest means the GIC
635-
* will not signal the CPU of interrupts of lower priority, and the
636-
* only way to get out will be via guest exceptions.
637-
* Naturally, we want to avoid this.
638-
*
639-
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
640-
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
641-
*/
642-
pmr_sync();
643-
}
644-
645-
static inline void kvm_arm_vhe_guest_exit(void)
646-
{
647-
/*
648-
* local_daif_restore() takes care to properly restore PSTATE.DAIF
649-
* and the GIC PMR if the host is using IRQ priorities.
650-
*/
651-
local_daif_restore(DAIF_PROCCTX_NOIRQ);
652-
653-
/*
654-
* When we exit from the guest we change a number of CPU configuration
655-
* parameters, such as traps. Make sure these changes take effect
656-
* before running the host or additional guests.
657-
*/
658-
isb();
659-
}
660-
661629
#define KVM_BP_HARDEN_UNKNOWN -1
662630
#define KVM_BP_HARDEN_WA_NEEDED 0
663631
#define KVM_BP_HARDEN_NOT_REQUIRED 1

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,13 @@
4747
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
4848
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
4949

50+
/*
51+
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
52+
* static inline can allow the compiler to out-of-line this. KVM always wants
53+
* the macro version as its always inlined.
54+
*/
55+
#define __kvm_swab32(x) ___constant_swab32(x)
56+
5057
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
5158

5259
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
9393
__le32 *origptr, __le32 *updptr, int nr_inst);
9494
void kvm_compute_layout(void);
9595

96-
static inline unsigned long __kern_hyp_va(unsigned long v)
96+
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
9797
{
9898
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
9999
"ror %0, %0, #1\n"
@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
473473
extern void *__kvm_bp_vect_base;
474474
extern int __kvm_harden_el2_vector_slot;
475475

476+
/* This is only called on a VHE system */
476477
static inline void *kvm_get_hyp_vector(void)
477478
{
478479
struct bp_hardening_data *data = arm64_get_bp_hardening_data();

0 commit comments

Comments
 (0)