blob: 91e4cf189914af550958c8f39444ac06d5f0898a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_MSR_H
3#define _ASM_X86_MSR_H
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +02004
Borislav Petkovb72e7462015-06-04 18:55:26 +02005#include "msr-index.h"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +02006
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +01007#ifndef __ASSEMBLY__
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +01008
9#include <asm/asm.h>
10#include <asm/errno.h>
Borislav Petkov6bc10962009-05-22 12:12:01 +020011#include <asm/cpumask.h>
Borislav Petkovb72e7462015-06-04 18:55:26 +020012#include <uapi/asm/msr.h>
Borislav Petkov6bc10962009-05-22 12:12:01 +020013
14struct msr {
15 union {
16 struct {
17 u32 l;
18 u32 h;
19 };
20 u64 q;
21 };
22};
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010023
Borislav Petkov6ede31e2009-12-17 00:16:25 +010024struct msr_info {
25 u32 msr_no;
26 struct msr reg;
27 struct msr *msrs;
28 int err;
29};
30
31struct msr_regs_info {
32 u32 *regs;
33 int err;
34};
35
Chen Yu7a9c2dd2015-11-25 01:03:41 +080036struct saved_msr {
37 bool valid;
38 struct msr_info info;
39};
40
41struct saved_msrs {
42 unsigned int num;
43 struct saved_msr *array;
44};
45
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010046/*
Jike Songd4f1b102008-10-17 13:25:07 +080047 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
48 * constraint has different meanings. For i386, "A" means exactly
49 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
50 * it means rax *or* rdx.
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010051 */
52#ifdef CONFIG_X86_64
George Spelvin5a33fcb2015-06-25 18:44:13 +020053/* Using 64-bit values saves one instruction clearing the high half of low */
54#define DECLARE_ARGS(val, low, high) unsigned long low, high
55#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010056#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
57#else
58#define DECLARE_ARGS(val, low, high) unsigned long long val
59#define EAX_EDX_VAL(val, low, high) (val)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010060#define EAX_EDX_RET(val, low, high) "=A" (val)
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +010061#endif
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020062
Andi Kleen7f47d8c2015-12-01 17:00:59 -080063#ifdef CONFIG_TRACEPOINTS
64/*
65 * Be very careful with includes. This header is prone to include loops.
66 */
67#include <asm/atomic.h>
68#include <linux/tracepoint-defs.h>
69
70extern struct tracepoint __tracepoint_read_msr;
71extern struct tracepoint __tracepoint_write_msr;
72extern struct tracepoint __tracepoint_rdpmc;
73#define msr_tracepoint_active(t) static_key_false(&(t).key)
Borislav Petkov5d07c2c2016-11-02 19:35:22 +010074extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
75extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
76extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
Andi Kleen7f47d8c2015-12-01 17:00:59 -080077#else
78#define msr_tracepoint_active(t) false
Borislav Petkov5d07c2c2016-11-02 19:35:22 +010079static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
80static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
81static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
Andi Kleen7f47d8c2015-12-01 17:00:59 -080082#endif
83
Borislav Petkova585df82017-01-20 21:29:41 +010084/*
85 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
86 * accessors and should not have any tracing or other functionality piggybacking
87 * on them - those are *purely* for accessing MSRs and nothing more. So don't even
88 * think of extending them - you will be slapped with a stinking trout or a frozen
89 * shark will reach you, wherever you are! You've been warned.
90 */
91static inline unsigned long long notrace __rdmsr(unsigned int msr)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020092{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010093 DECLARE_ARGS(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020094
Andy Lutomirskifbd70432016-04-02 07:01:37 -070095 asm volatile("1: rdmsr\n"
96 "2:\n"
97 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
98 : EAX_EDX_RET(val, low, high) : "c" (msr));
Borislav Petkova585df82017-01-20 21:29:41 +010099
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100100 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200101}
102
Borislav Petkova585df82017-01-20 21:29:41 +0100103static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
104{
105 asm volatile("1: wrmsr\n"
106 "2:\n"
107 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
108 : : "c" (msr), "a"(low), "d" (high) : "memory");
109}
110
Borislav Petkovc996f382018-03-01 16:13:36 +0100111#define native_rdmsr(msr, val1, val2) \
112do { \
113 u64 __val = __rdmsr((msr)); \
114 (void)((val1) = (u32)__val); \
115 (void)((val2) = (u32)(__val >> 32)); \
116} while (0)
117
118#define native_wrmsr(msr, low, high) \
119 __wrmsr(msr, low, high)
120
121#define native_wrmsrl(msr, val) \
122 __wrmsr((msr), (u32)((u64)(val)), \
123 (u32)((u64)(val) >> 32))
124
Borislav Petkova585df82017-01-20 21:29:41 +0100125static inline unsigned long long native_read_msr(unsigned int msr)
126{
127 unsigned long long val;
128
129 val = __rdmsr(msr);
130
131 if (msr_tracepoint_active(__tracepoint_read_msr))
132 do_trace_read_msr(msr, val, 0);
133
134 return val;
135}
136
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200137static inline unsigned long long native_read_msr_safe(unsigned int msr,
138 int *err)
139{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100140 DECLARE_ARGS(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200141
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700142 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200143 "1:\n\t"
144 ".section .fixup,\"ax\"\n\t"
Andy Lutomirskib828b792016-04-02 07:01:40 -0700145 "3: mov %[fault],%[err]\n\t"
146 "xorl %%eax, %%eax\n\t"
147 "xorl %%edx, %%edx\n\t"
148 "jmp 1b\n\t"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200149 ".previous\n\t"
Joe Perchesabb0ade2008-03-23 01:02:51 -0700150 _ASM_EXTABLE(2b, 3b)
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700151 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
H. Peter Anvin0cc02132009-08-31 14:23:29 -0700152 : "c" (msr), [fault] "i" (-EIO));
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800153 if (msr_tracepoint_active(__tracepoint_read_msr))
154 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100155 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200156}
157
Andy Lutomirskidd2f4a02016-04-02 07:01:38 -0700158/* Can be uninlined because referenced by paravirt */
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100159static inline void notrace
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100160native_write_msr(unsigned int msr, u32 low, u32 high)
Wanpeng Lib2c5ea42016-11-07 11:13:39 +0800161{
Borislav Petkova585df82017-01-20 21:29:41 +0100162 __wrmsr(msr, low, high);
163
Dr. David Alan Gilbert08dd8cd2016-06-03 19:00:59 +0100164 if (msr_tracepoint_active(__tracepoint_write_msr))
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800165 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200166}
167
Frederic Weisbecker0ca59dd2008-12-24 23:30:02 +0100168/* Can be uninlined because referenced by paravirt */
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100169static inline int notrace
170native_write_msr_safe(unsigned int msr, u32 low, u32 high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200171{
172 int err;
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100173
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700174 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200175 "1:\n\t"
176 ".section .fixup,\"ax\"\n\t"
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700177 "3: mov %[fault],%[err] ; jmp 1b\n\t"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200178 ".previous\n\t"
Joe Perchesabb0ade2008-03-23 01:02:51 -0700179 _ASM_EXTABLE(2b, 3b)
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700180 : [err] "=a" (err)
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100181 : "c" (msr), "0" (low), "d" (high),
H. Peter Anvin0cc02132009-08-31 14:23:29 -0700182 [fault] "i" (-EIO)
Jeremy Fitzhardingeaf2b1c62008-06-25 00:18:59 -0400183 : "memory");
Dr. David Alan Gilbert08dd8cd2016-06-03 19:00:59 +0100184 if (msr_tracepoint_active(__tracepoint_write_msr))
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800185 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200186 return err;
187}
188
Andre Przywara1f975f72012-06-01 16:52:35 +0200189extern int rdmsr_safe_regs(u32 regs[8]);
190extern int wrmsr_safe_regs(u32 regs[8]);
Borislav Petkov132ec922009-08-31 09:50:09 +0200191
Andy Lutomirski4ea16362015-06-25 18:44:07 +0200192/**
193 * rdtsc() - returns the current TSC without ordering constraints
194 *
195 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
196 * only ordering constraint it supplies is the ordering implied by
197 * "asm volatile": it will put the RDTSC in the place you expect. The
198 * CPU can and will speculatively execute that RDTSC, though, so the
199 * results can be non-monotonic if compared on different CPUs.
200 */
201static __always_inline unsigned long long rdtsc(void)
Ingo Molnar92767af2008-01-30 13:32:40 +0100202{
203 DECLARE_ARGS(val, low, high);
204
Ingo Molnar92767af2008-01-30 13:32:40 +0100205 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
Ingo Molnar92767af2008-01-30 13:32:40 +0100206
207 return EAX_EDX_VAL(val, low, high);
208}
209
Andy Lutomirski03b97302015-06-25 18:44:08 +0200210/**
211 * rdtsc_ordered() - read the current TSC in program order
212 *
213 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
214 * It is ordered like a load to a global in-memory counter. It should
215 * be impossible to observe non-monotonic rdtsc_unordered() behavior
216 * across multiple CPUs as long as the TSC is synced.
217 */
218static __always_inline unsigned long long rdtsc_ordered(void)
219{
220 /*
221 * The RDTSC instruction is not ordered relative to memory
222 * access. The Intel SDM and the AMD APM are both vague on this
223 * point, but empirically an RDTSC instruction can be
224 * speculatively executed before prior loads. An RDTSC
225 * immediately after an appropriate barrier appears to be
226 * ordered as a normal load, that is, it provides the same
227 * ordering guarantees as reading from a global memory location
228 * that some other imaginary CPU is updating continuously with a
229 * time stamp.
230 */
Dan Williamsb3d7ad82018-01-29 17:02:33 -0800231 barrier_nospec();
Andy Lutomirski03b97302015-06-25 18:44:08 +0200232 return rdtsc();
233}
234
Glauber de Oliveira Costab8d1fae2008-01-30 13:31:07 +0100235static inline unsigned long long native_read_pmc(int counter)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200236{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100237 DECLARE_ARGS(val, low, high);
238
239 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800240 if (msr_tracepoint_active(__tracepoint_rdpmc))
241 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100242 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200243}
244
Juergen Gross9bad5652018-08-28 09:40:23 +0200245#ifdef CONFIG_PARAVIRT_XXL
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200246#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200247#else
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200248#include <linux/errno.h>
249/*
250 * Access to machine-specific registers (available on 586 and better only)
251 * Note: the rd* operations modify the parameters directly (without using
252 * pointer indirection), this allows gcc to optimize better
253 */
254
Borislav Petkov1423bed2013-03-04 21:16:19 +0100255#define rdmsr(msr, low, high) \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700256do { \
257 u64 __val = native_read_msr((msr)); \
Borislav Petkov1423bed2013-03-04 21:16:19 +0100258 (void)((low) = (u32)__val); \
259 (void)((high) = (u32)(__val >> 32)); \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700260} while (0)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200261
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100262static inline void wrmsr(unsigned int msr, u32 low, u32 high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200263{
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100264 native_write_msr(msr, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200265}
266
Joe Perchesabb0ade2008-03-23 01:02:51 -0700267#define rdmsrl(msr, val) \
268 ((val) = native_read_msr((msr)))
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200269
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100270static inline void wrmsrl(unsigned int msr, u64 val)
Andy Lutomirski47edb652015-07-23 12:14:40 -0700271{
Borislav Petkov679bcea2015-11-23 11:12:26 +0100272 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
Andy Lutomirski47edb652015-07-23 12:14:40 -0700273}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200274
275/* wrmsr with exception handling */
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100276static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200277{
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100278 return native_write_msr_safe(msr, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200279}
280
H. Peter Anvin060feb62012-04-19 17:07:34 -0700281/* rdmsr with exception handling */
Borislav Petkov1423bed2013-03-04 21:16:19 +0100282#define rdmsr_safe(msr, low, high) \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700283({ \
284 int __err; \
285 u64 __val = native_read_msr_safe((msr), &__err); \
Borislav Petkov1423bed2013-03-04 21:16:19 +0100286 (*low) = (u32)__val; \
287 (*high) = (u32)(__val >> 32); \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700288 __err; \
289})
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200290
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100291static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
Andi Kleen1de87bd2008-03-22 10:59:28 +0100292{
293 int err;
294
295 *p = native_read_msr_safe(msr, &err);
296 return err;
297}
Borislav Petkov177fed12009-08-31 09:50:10 +0200298
Joe Perchesabb0ade2008-03-23 01:02:51 -0700299#define rdpmc(counter, low, high) \
300do { \
301 u64 _l = native_read_pmc((counter)); \
302 (low) = (u32)_l; \
303 (high) = (u32)(_l >> 32); \
304} while (0)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100305
Andi Kleen1ff4d582012-06-05 17:56:50 -0700306#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
307
Juergen Gross9bad5652018-08-28 09:40:23 +0200308#endif /* !CONFIG_PARAVIRT_XXL */
Andy Lutomirski9261e052015-06-25 18:43:57 +0200309
Andy Lutomirskicf991de2015-06-04 17:13:44 -0700310/*
311 * 64-bit version of wrmsr_safe():
312 */
313static inline int wrmsrl_safe(u32 msr, u64 val)
314{
315 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
316}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200317
Borislav Petkov1423bed2013-03-04 21:16:19 +0100318#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200319
Sheng Yang5df97402009-12-16 13:48:04 +0800320#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200321
Borislav Petkov50542252009-12-11 18:14:40 +0100322struct msr *msrs_alloc(void);
323void msrs_free(struct msr *msrs);
Borislav Petkov22085a62014-03-09 18:05:23 +0100324int msr_set_bit(u32 msr, u8 bit);
325int msr_clear_bit(u32 msr, u8 bit);
Borislav Petkov50542252009-12-11 18:14:40 +0100326
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200327#ifdef CONFIG_SMP
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700328int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
329int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
Jacob Pan1a6b9912013-10-11 16:54:58 -0700330int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
331int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
Borislav Petkovb8a47542009-07-30 11:10:02 +0200332void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
333void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200334int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
335int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
Jacob Pan1a6b9912013-10-11 16:54:58 -0700336int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
337int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
H. Peter Anvin8b956bf2009-08-31 14:13:48 -0700338int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
339int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200340#else /* CONFIG_SMP */
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700341static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200342{
343 rdmsr(msr_no, *l, *h);
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700344 return 0;
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200345}
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700346static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200347{
348 wrmsr(msr_no, l, h);
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700349 return 0;
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200350}
Jacob Pan1a6b9912013-10-11 16:54:58 -0700351static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
352{
353 rdmsrl(msr_no, *q);
354 return 0;
355}
356static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
357{
358 wrmsrl(msr_no, q);
359 return 0;
360}
Rusty Russell0d0fbbd2009-11-05 22:45:41 +1030361static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
Borislav Petkovb034c192009-05-22 13:52:19 +0200362 struct msr *msrs)
363{
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100364 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
Borislav Petkovb034c192009-05-22 13:52:19 +0200365}
Rusty Russell0d0fbbd2009-11-05 22:45:41 +1030366static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
Borislav Petkovb034c192009-05-22 13:52:19 +0200367 struct msr *msrs)
368{
Borislav Petkov5d07c2c2016-11-02 19:35:22 +0100369 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
Borislav Petkovb034c192009-05-22 13:52:19 +0200370}
Joe Perchesabb0ade2008-03-23 01:02:51 -0700371static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
372 u32 *l, u32 *h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200373{
374 return rdmsr_safe(msr_no, l, h);
375}
376static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
377{
378 return wrmsr_safe(msr_no, l, h);
379}
Jacob Pan1a6b9912013-10-11 16:54:58 -0700380static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
381{
382 return rdmsrl_safe(msr_no, q);
383}
384static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
385{
386 return wrmsrl_safe(msr_no, q);
387}
H. Peter Anvin8b956bf2009-08-31 14:13:48 -0700388static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
389{
390 return rdmsr_safe_regs(regs);
391}
392static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
393{
394 return wrmsr_safe_regs(regs);
395}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200396#endif /* CONFIG_SMP */
H. Peter Anvinff55df52009-08-31 14:16:57 -0700397#endif /* __ASSEMBLY__ */
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700398#endif /* _ASM_X86_MSR_H */