Skip to content

Commit 3088d26

Browse files
committed
Merge tag 'x86-urgent-2025-04-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 fixes from Ingo Molnar: - Fix hypercall detection on Xen guests - Extend the AMD microcode loader SHA check to Zen5, to block loading of any unreleased standalone Zen5 microcode patches - Add new Intel CPU model number for Bartlett Lake - Fix the workaround for AMD erratum 1054 - Fix buggy early memory acceptance between SEV-SNP guests and the EFI stub * tag 'x86-urgent-2025-04-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot/sev: Avoid shared GHCB page for early memory acceptance x86/cpu/amd: Fix workaround for erratum 1054 x86/cpu: Add CPU model number for Bartlett Lake CPUs with Raptor Cove cores x86/microcode/AMD: Extend the SHA check to Zen5, block loading of any unreleased standalone Zen5 microcode patches x86/xen: Fix __xen_hypercall_setfunc()
2 parents ac85740 + d54d610 commit 3088d26

File tree

7 files changed

+43
-68
lines changed

7 files changed

+43
-68
lines changed

arch/x86/boot/compressed/mem.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
3434

3535
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
3636
{
37+
static bool sevsnp;
38+
3739
/* Platform-specific memory-acceptance call goes here */
3840
if (early_is_tdx_guest()) {
3941
if (!tdx_accept_memory(start, end))
4042
panic("TDX: Failed to accept memory\n");
41-
} else if (sev_snp_enabled()) {
43+
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
44+
sevsnp = true;
4245
snp_accept_memory(start, end);
4346
} else {
4447
error("Cannot accept memory: unknown platform\n");

arch/x86/boot/compressed/sev.c

+15-52
Original file line numberDiff line numberDiff line change
@@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
164164

165165
static void __page_state_change(unsigned long paddr, enum psc_op op)
166166
{
167-
u64 val;
168-
169-
if (!sev_snp_enabled())
170-
return;
167+
u64 val, msr;
171168

172169
/*
173170
* If private -> shared then invalidate the page before requesting the
@@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
176173
if (op == SNP_PAGE_STATE_SHARED)
177174
pvalidate_4k_page(paddr, paddr, false);
178175

176+
/* Save the current GHCB MSR value */
177+
msr = sev_es_rd_ghcb_msr();
178+
179179
/* Issue VMGEXIT to change the page state in RMP table. */
180180
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
181181
VMGEXIT();
@@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
185185
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
186186
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
187187

188+
/* Restore the GHCB MSR value */
189+
sev_es_wr_ghcb_msr(msr);
190+
188191
/*
189192
* Now that page state is changed in the RMP table, validate it so that it is
190193
* consistent with the RMP entry.
@@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
195198

196199
void snp_set_page_private(unsigned long paddr)
197200
{
201+
if (!sev_snp_enabled())
202+
return;
203+
198204
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
199205
}
200206

201207
void snp_set_page_shared(unsigned long paddr)
202208
{
209+
if (!sev_snp_enabled())
210+
return;
211+
203212
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
204213
}
205214

@@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
223232
return true;
224233
}
225234

226-
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
227-
phys_addr_t pa, phys_addr_t pa_end)
228-
{
229-
struct psc_hdr *hdr;
230-
struct psc_entry *e;
231-
unsigned int i;
232-
233-
hdr = &desc->hdr;
234-
memset(hdr, 0, sizeof(*hdr));
235-
236-
e = desc->entries;
237-
238-
i = 0;
239-
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
240-
hdr->end_entry = i;
241-
242-
e->gfn = pa >> PAGE_SHIFT;
243-
e->operation = SNP_PAGE_STATE_PRIVATE;
244-
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
245-
e->pagesize = RMP_PG_SIZE_2M;
246-
pa += PMD_SIZE;
247-
} else {
248-
e->pagesize = RMP_PG_SIZE_4K;
249-
pa += PAGE_SIZE;
250-
}
251-
252-
e++;
253-
i++;
254-
}
255-
256-
if (vmgexit_psc(boot_ghcb, desc))
257-
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
258-
259-
pvalidate_pages(desc);
260-
261-
return pa;
262-
}
263-
264235
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
265236
{
266-
struct snp_psc_desc desc = {};
267-
unsigned int i;
268-
phys_addr_t pa;
269-
270-
if (!boot_ghcb && !early_setup_ghcb())
271-
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
272-
273-
pa = start;
274-
while (pa < end)
275-
pa = __snp_accept_memory(&desc, pa, end);
237+
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
238+
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
276239
}
277240

278241
void sev_es_shutdown_ghcb(void)

arch/x86/boot/compressed/sev.h

+2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,13 @@
1212

1313
bool sev_snp_enabled(void);
1414
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
15+
u64 sev_get_status(void);
1516

1617
#else
1718

1819
static inline bool sev_snp_enabled(void) { return false; }
1920
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
21+
static inline u64 sev_get_status(void) { return 0; }
2022

2123
#endif
2224

arch/x86/include/asm/intel-family.h

+2
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,8 @@
126126
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
127127
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
128128

129+
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
130+
129131
/* "Hybrid" Processors (P-Core/E-Core) */
130132

131133
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */

arch/x86/kernel/cpu/amd.c

+12-7
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
869869

870870
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
871871
setup_force_cpu_bug(X86_BUG_DIV0);
872+
873+
/*
874+
* Turn off the Instructions Retired free counter on machines that are
875+
* susceptible to erratum #1054 "Instructions Retired Performance
876+
* Counter May Be Inaccurate".
877+
*/
878+
if (c->x86_model < 0x30) {
879+
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
880+
clear_cpu_cap(c, X86_FEATURE_IRPERF);
881+
}
872882
}
873883

874884
static bool cpu_has_zenbleed_microcode(void)
@@ -1052,13 +1062,8 @@ static void init_amd(struct cpuinfo_x86 *c)
10521062
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
10531063
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
10541064

1055-
/*
1056-
* Turn on the Instructions Retired free counter on machines not
1057-
* susceptible to erratum #1054 "Instructions Retired Performance
1058-
* Counter May Be Inaccurate".
1059-
*/
1060-
if (cpu_has(c, X86_FEATURE_IRPERF) &&
1061-
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1065+
/* Enable the Instructions Retired free counter */
1066+
if (cpu_has(c, X86_FEATURE_IRPERF))
10621067
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
10631068

10641069
check_null_seg_clears_base(c);

arch/x86/kernel/cpu/microcode/amd.c

+7-2
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
199199
case 0xa70c0: return cur_rev <= 0xa70C009; break;
200200
case 0xaa001: return cur_rev <= 0xaa00116; break;
201201
case 0xaa002: return cur_rev <= 0xaa00218; break;
202+
case 0xb0021: return cur_rev <= 0xb002146; break;
203+
case 0xb1010: return cur_rev <= 0xb101046; break;
204+
case 0xb2040: return cur_rev <= 0xb204031; break;
205+
case 0xb4040: return cur_rev <= 0xb404031; break;
206+
case 0xb6000: return cur_rev <= 0xb600031; break;
207+
case 0xb7000: return cur_rev <= 0xb700031; break;
202208
default: break;
203209
}
204210

@@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
214220
struct sha256_state s;
215221
int i;
216222

217-
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
218-
x86_family(bsp_cpuid_1_eax) > 0x19)
223+
if (x86_family(bsp_cpuid_1_eax) < 0x17)
219224
return true;
220225

221226
if (!need_sha_check(cur_rev))

arch/x86/xen/enlighten.c

+1-6
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
103103
void (*func)(void);
104104

105105
/*
106-
* Xen is supported only on CPUs with CPUID, so testing for
107-
* X86_FEATURE_CPUID is a test for early_cpu_init() having been
108-
* run.
109-
*
110106
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
111107
* dependency chain: it is being called via the xen_hypercall static
112108
* call when running as a PVH or HVM guest. Hypercalls need to be
@@ -118,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
118114
*/
119115
instrumentation_begin();
120116

121-
if (!boot_cpu_has(X86_FEATURE_CPUID))
122-
xen_get_vendor();
117+
xen_get_vendor();
123118

124119
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
125120
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))

0 commit comments

Comments
 (0)