summaryrefslogtreecommitdiff
path: root/linux/external-module-compat-comm.h
blob: e12994bccaeff8fd8e99c55040b3636075cfbb2b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093

/*
 * Compatibility header for building as an external module.
 */

/*
 * Avoid picking up the kernel's kvm.h in case we have a newer one.
 */

#include <linux/compiler.h>
#include <linux/version.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/time.h>
#include <asm/processor.h>
#include <linux/hrtimer.h>
#include <asm/bitops.h>

#include "kvm-kmod-config.h"

/*
 * 2.6.16 does not have GFP_NOWAIT
 */

#include <linux/gfp.h>

#ifndef GFP_NOWAIT
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
#endif


/*
 * kvm profiling support needs 2.6.20
 */
#include <linux/profile.h>

#ifndef KVM_PROFILING
#define KVM_PROFILING 1234
#define prof_on       4321
#endif

/*
 * smp_call_function_single() is not exported below 2.6.20, and has different
 * semantics below 2.6.23.  The 'nonatomic' argument was removed in 2.6.27.
 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)

int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
				 void *info, int wait);
#undef smp_call_function_single
#define smp_call_function_single kvm_smp_call_function_single

#endif

/* on_each_cpu() lost an argument in 2.6.27. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)

#define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)

#else

#define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)

#endif

/*
 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
 */

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
#define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
#define mutex_lock_interruptible(a) down_interruptible(a)
#define mutex_unlock(a) up(a)
#define mutex_lock(a) down(a)
#define mutex_init(a) init_MUTEX(a)
#define mutex_trylock(a) down_trylock(a)
#define mutex semaphore
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
#ifndef kzalloc
#define kzalloc(size,flags)			\
({						\
	void *__ret = kmalloc(size, flags);	\
	if (__ret)				\
		memset(__ret, 0, size);		\
	__ret;					\
})
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
#ifndef kmem_cache_zalloc
#define kmem_cache_zalloc(cache,flags)			  \
({							  \
	void *__ret = kmem_cache_alloc(cache, flags);	  \
	if (__ret)                                        \
		memset(__ret, 0, kmem_cache_size(cache)); \
	__ret;                                            \
})
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)

#ifndef CONFIG_HOTPLUG_CPU
#define register_cpu_notifier(nb) (0)
#endif

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
#define nr_cpu_ids NR_CPUS
#endif

#include <linux/miscdevice.h>
#ifndef KVM_MINOR
#define KVM_MINOR 232
#endif

#include <linux/notifier.h>
#ifndef CPU_TASKS_FROZEN

#define CPU_TASKS_FROZEN       0x0010
#define CPU_ONLINE_FROZEN      (CPU_ONLINE | CPU_TASKS_FROZEN)
#define CPU_UP_PREPARE_FROZEN  (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
#define CPU_DOWN_PREPARE_FROZEN        (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN                (CPU_DEAD | CPU_TASKS_FROZEN)

#endif

#ifndef CPU_DYING
#define CPU_DYING 0x000A
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
#endif

#include <asm/system.h>

struct inode;
#include <linux/anon_inodes.h>
#define anon_inode_getfd kvm_anon_inode_getfd
int kvm_init_anon_inodes(void);
void kvm_exit_anon_inodes(void);
int anon_inode_getfd(const char *name,
		     const struct file_operations *fops,
		     void *priv , int flags);

/*
 * 2.6.23 removed the cache destructor
 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
#  define kmem_cache_create(name, size, align, flags, ctor) \
	kmem_cache_create(name, size, align, flags, ctor, NULL)
#endif

/* HRTIMER_MODE_ABS started life with a different name */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
#define HRTIMER_MODE_ABS HRTIMER_ABS
#endif

/* div64_u64 is fairly new */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)

#define div64_u64 kvm_div64_u64

#ifdef CONFIG_64BIT

static inline uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
{
	return dividend / divisor;
}

#else

uint64_t div64_u64(uint64_t dividend, uint64_t divisor);

#endif

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)

#ifdef RHEL_RELEASE_CODE
#if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
#define RHEL_BOOL 1
#endif
#endif

#ifndef RHEL_BOOL

typedef _Bool bool;

#define false 0
#define true 1

#endif

#endif

/*
 * PF_VCPU is a Linux 2.6.24 addition
 */

#include <linux/sched.h>

#ifndef PF_VCPU
#define PF_VCPU 0
#endif

/*
 * smp_call_function_mask() is not defined/exported below 2.6.24 on all
 * targets and below 2.6.26 on x86-64
 */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) || \
    (defined CONFIG_X86_64 && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))

int kvm_smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
			       void *info, int wait);

#define smp_call_function_mask kvm_smp_call_function_mask

void kvm_smp_send_reschedule(int cpu);

#else

#define kvm_smp_send_reschedule smp_send_reschedule

#endif

/* empty_zero_page isn't exported in all kernels */
#include <asm/pgtable.h>

#define empty_zero_page kvm_empty_zero_page

static char empty_zero_page[PAGE_SIZE];

static inline void blahblah(void)
{
	(void)empty_zero_page[0];
}

/* __mmdrop() is not exported before 2.6.25 */
#include <linux/sched.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)

#define mmdrop(x) do { (void)(x); } while (0)
#define mmget(x) do { (void)(x); } while (0)

#else

#define mmget(x) do { atomic_inc(x); } while (0)

#endif

/* pagefault_enable(), page_fault_disable() - 2.6.20 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
#  define KVM_NEED_PAGEFAULT_DISABLE 1
#  ifdef RHEL_RELEASE_CODE
#    if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,3)
#      undef KVM_NEED_PAGEFAULT_DISABLE
#    endif
#  endif
#endif

#ifdef KVM_NEED_PAGEFAULT_DISABLE

static inline void pagefault_disable(void)
{
	inc_preempt_count();
	/*
	 * make sure to have issued the store before a pagefault
	 * can hit.
	 */
	barrier();
}

static inline void pagefault_enable(void)
{
	/*
	 * make sure to issue those last loads/stores before enabling
	 * the pagefault handler again.
	 */
	barrier();
	dec_preempt_count();
	/*
	 * make sure we do..
	 */
	barrier();
	preempt_check_resched();
}

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
#include <asm/uaccess.h>
#else
#include <linux/uaccess.h>
#endif

/* vm ops ->fault() was introduced in 2.6.23. */
#include <linux/mm.h>

#ifdef KVM_MAIN
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)

struct vm_fault {
	unsigned int flags;
	pgoff_t pgoff;
	void __user *virtual_address;
	struct page *page;
};

static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);

static inline struct page *kvm_nopage_to_fault(
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf),
	struct vm_area_struct *vma,
	unsigned long address,
	int *type)
{
	struct vm_fault vmf;
	int ret;

	vmf.pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
	vmf.virtual_address = (void __user *)address;
	ret = fault(vma, &vmf);
	if (ret)
		return NOPAGE_SIGBUS;
	*type = VM_FAULT_MINOR;
	return vmf.page;
}

static inline struct page *__kvm_vcpu_fault(struct vm_area_struct *vma,
					    unsigned long address,
					    int *type)
{
	return kvm_nopage_to_fault(kvm_vcpu_fault, vma, address, type);
}

static inline struct page *__kvm_vm_fault(struct vm_area_struct *vma,
					  unsigned long address,
					  int *type)
{
	return kvm_nopage_to_fault(kvm_vm_fault, vma, address, type);
}

#define VMA_OPS_FAULT(x) nopage
#define VMA_OPS_FAULT_FUNC(x) __##x

#else

#define VMA_OPS_FAULT(x) x
#define VMA_OPS_FAULT_FUNC(x) x

#endif
#endif

/* simple vfs attribute getter signature has changed to add a return code */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)

#define MAKE_SIMPLE_ATTRIBUTE_GETTER(x)       \
	static u64 x(void *v)                 \
	{				      \
		u64 ret = 0;		      \
					      \
		__##x(v, &ret);		      \
		return ret;		      \
	}

#else

#define MAKE_SIMPLE_ATTRIBUTE_GETTER(x)       \
	static int x(void *v, u64 *val)	      \
	{				      \
		return __##x(v, val);	      \
	}

#endif

/* set_kset_name() is gone in 2.6.25 */

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)

#define set_kset_name(x) .name = x

#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#ifndef FASTCALL
#define FASTCALL(x)	x
#define fastcall
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)

unsigned kvm_get_tsc_khz(void);
#define kvm_tsc_khz (kvm_get_tsc_khz())

#else

#define kvm_tsc_khz tsc_khz

#endif

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)

#include <linux/ktime.h>
#include <linux/hrtimer.h>

#define ktime_get kvm_ktime_get

static inline ktime_t ktime_get(void)
{
	struct timespec now;

	ktime_get_ts(&now);

	return timespec_to_ktime(now);
}

#endif

/* __aligned arrived in 2.6.21 */
#ifndef __aligned
#define __aligned(x) __attribute__((__aligned__(x)))
#endif

#include <linux/mm.h>

/* The shrinker API changed in 2.6.23 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)

struct kvm_shrinker {
	int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
	int seeks;
	struct shrinker *kshrinker;
};

static inline void register_shrinker(struct kvm_shrinker *shrinker)
{
	shrinker->kshrinker = set_shrinker(shrinker->seeks, shrinker->shrink);
}

static inline void unregister_shrinker(struct kvm_shrinker *shrinker)
{
	if (shrinker->kshrinker)
		remove_shrinker(shrinker->kshrinker);
}

#define shrinker kvm_shrinker

#endif

/* clocksource */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
	/*  khz = cyc/(Million ns)
	 *  mult/2^shift  = ns/cyc
	 *  mult = ns/cyc * 2^shift
	 *  mult = 1Million/khz * 2^shift
	 *  mult = 1000000 * 2^shift / khz
	 *  mult = (1000000<<shift) / khz
	 */
	u64 tmp = ((u64)1000000) << shift_constant;

	tmp += khz/2; /* round for do_div */
	do_div(tmp, khz);

	return (u32)tmp;
}
#else
#include <linux/clocksource.h>
#endif

/* manually export hrtimer_init/start/cancel */
#include <linux/kallsyms.h>
extern void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
			      enum hrtimer_mode mode);
extern int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
			      const enum hrtimer_mode mode);
extern int (*hrtimer_cancel_p)(struct hrtimer *timer);

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
static inline void hrtimer_kallsyms_resolve(void)
{
	hrtimer_init_p = (void *) kallsyms_lookup_name("hrtimer_init");
	BUG_ON(!hrtimer_init_p);
	hrtimer_start_p = (void *) kallsyms_lookup_name("hrtimer_start");
	BUG_ON(!hrtimer_start_p);
	hrtimer_cancel_p = (void *) kallsyms_lookup_name("hrtimer_cancel");
	BUG_ON(!hrtimer_cancel_p);
}
#else
static inline void hrtimer_kallsyms_resolve(void)
{
	hrtimer_init_p = hrtimer_init;
	hrtimer_start_p = hrtimer_start;
	hrtimer_cancel_p = hrtimer_cancel;
}
#endif

/* handle old hrtimer API with data pointer */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
static inline void hrtimer_data_pointer(struct hrtimer *timer)
{
	timer->data = (void *)timer;
}
#else
static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)

#define ns_to_timespec kvm_ns_to_timespec

struct timespec kvm_ns_to_timespec(const s64 nsec);

#endif

/* work_struct lost the 'data' field in 2.6.20 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)

#define kvm_INIT_WORK(work, handler) \
	INIT_WORK(work, (void (*)(void *))handler, work)

#else

#define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)

#endif

/* cancel_work_sync() was flush_work() in 2.6.21 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)

static inline int cancel_work_sync(struct work_struct *work)
{
	/*
	 * FIXME: actually cancel.  How?  Add own implementation of workqueues?
	 */
	return 0;
}

/* ... and it returned void before 2.6.23 */
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)

#define cancel_work_sync(work) ({ cancel_work_sync(work); 0; })

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)

static inline void flush_work(struct work_struct *work)
{
	cancel_work_sync(work);
}

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)

struct pci_dev;

struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
#include <linux/relayfs_fs.h>
#else
#include <linux/relay.h>
#endif

/* relay_open() interface has changed on 2.6.21 */

struct rchan *kvm_relay_open(const char *base_filename,
			 struct dentry *parent,
			 size_t subbuf_size,
			 size_t n_subbufs,
			 struct rchan_callbacks *cb,
			 void *private_data);

#else

#define kvm_relay_open relay_open

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)

static inline int get_user_pages_fast(unsigned long start, int nr_pages,
				      int write, struct page **pages)
{
	int npages;

	down_read(&current->mm->mmap_sem);
	npages = get_user_pages(current, current->mm, start, nr_pages, write,
				0, pages, NULL);
	up_read(&current->mm->mmap_sem);

	return npages;
}

#endif

/* spin_needbreak() was called something else in 2.6.24 */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)

#define spin_needbreak need_lockbreak

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)

static inline void kvm_hrtimer_add_expires_ns(struct hrtimer *timer, u64 delta)
{
	timer->expires = ktime_add_ns(timer->expires, delta);
}

static inline ktime_t kvm_hrtimer_get_expires(struct hrtimer *timer)
{
	return timer->expires;
}

static inline u64 kvm_hrtimer_get_expires_ns(struct hrtimer *timer)
{
	return ktime_to_ns(timer->expires);
}

static inline void kvm_hrtimer_start_expires(struct hrtimer *timer, int mode)
{
	hrtimer_start_p(timer, timer->expires, mode);
}

static inline ktime_t kvm_hrtimer_expires_remaining(const struct hrtimer *timer)
{
    return ktime_sub(timer->expires, timer->base->get_time());
}

#else

#define kvm_hrtimer_add_expires_ns hrtimer_add_expires_ns
#define kvm_hrtimer_get_expires hrtimer_get_expires
#define kvm_hrtimer_get_expires_ns hrtimer_get_expires_ns
#define kvm_hrtimer_start_expires hrtimer_start_expires
#define kvm_hrtimer_expires_remaining hrtimer_expires_remaining

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)

static inline int pci_reset_function(struct pci_dev *dev)
{
	return 0;
}

#endif

#include <linux/interrupt.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)

typedef irqreturn_t (*kvm_irq_handler_t)(int, void *);
int kvm_request_irq(unsigned int a, kvm_irq_handler_t handler, unsigned long c,
		    const char *d, void *e);
void kvm_free_irq(unsigned int irq, void *dev_id);

#else

#define kvm_request_irq request_irq
#define kvm_free_irq free_irq

#endif

/* dynamically allocated cpu masks introduced in 2.6.28 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)

typedef cpumask_t cpumask_var_t[1];

static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
	return 1;
}

static inline void free_cpumask_var(cpumask_var_t mask)
{
}

static inline void cpumask_clear(cpumask_var_t mask)
{
	cpus_clear(*mask);
}

static inline void cpumask_set_cpu(int cpu, cpumask_var_t mask)
{
	cpu_set(cpu, *mask);
}

static inline int smp_call_function_many(cpumask_var_t cpus,
					 void (*func)(void *data), void *data,
					 int sync)
{
	return smp_call_function_mask(*cpus, func, data, sync);
}

static inline int cpumask_empty(cpumask_var_t mask)
{
	return cpus_empty(*mask);
}

static inline int cpumask_test_cpu(int cpu, cpumask_var_t mask)
{
	return cpu_isset(cpu, *mask);
}

static inline void cpumask_clear_cpu(int cpu, cpumask_var_t mask)
{
	cpu_clear(cpu, *mask);
}

#define cpu_online_mask (&cpu_online_map)

#define cpumask_any(m) first_cpu(*(m))

#endif

/* A zeroing constructor was added late 2.6.30 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)

static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
	bool ret;

	ret = alloc_cpumask_var(mask, flags);
	if (ret)
		cpumask_clear(*mask);
	return ret;
}

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)

#define IF_ANON_INODES_DOES_REFCOUNTS(x)

#else

#define IF_ANON_INODES_DOES_REFCOUNTS(x) x

#endif


/* Macro introduced only on newer kernels: */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
#define marker_synchronize_unregister() synchronize_sched()
#endif

/* pci_dev.msi_enable was introduced in 2.6.18 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)

struct pci_dev;

int kvm_pcidev_msi_enabled(struct pci_dev *dev);

#else

#define kvm_pcidev_msi_enabled(dev)	(dev)->msi_enabled

#endif

/* compound_head() was introduced in 2.6.22 */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
#  define NEED_COMPOUND_HEAD 1
#  ifdef RHEL_RELEASE_CODE
#    if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
#      undef NEED_COMPOUND_HEAD
#    endif
#  endif
#endif

#ifdef NEED_COMPOUND_HEAD

static inline struct page *compound_head(struct page *page)
{
	if (PageCompound(page))
		page = (struct page *)page_private(page);
	return page;
}

#endif

#include <linux/iommu.h>
#ifndef IOMMU_CACHE

#define IOMMU_CACHE	(4)
#define IOMMU_CAP_CACHE_COHERENCY	0x1
static inline int iommu_domain_has_cap(struct iommu_domain *domain,
				       unsigned long cap)
{
	return 0;
}

#endif

#include <linux/file.h>

/* srcu was born in 2.6.19 */

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)

#define kvm_init_srcu_struct init_srcu_struct
#define kvm_cleanup_srcu_struct cleanup_srcu_struct
#define kvm_srcu_read_lock srcu_read_lock
#define kvm_srcu_read_unlock srcu_read_unlock
#define kvm_synchronize_srcu synchronize_srcu
#define kvm_srcu_batches_completed srcu_batches_completed

#endif

/* tracepoints were introduced in 2.6.28, but changed in 2.6.30 */

#include <linux/tracepoint.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)

struct tracepoint;

#undef DECLARE_TRACE
#undef DEFINE_TRACE
#undef PARAMS
#undef TP_PROTO
#undef TP_ARGS
#undef EXPORT_TRACEPOINT_SYMBOL
#undef EXPORT_TRACEPOINT_SYMBOL_GPL

#define DECLARE_TRACE(name, proto, args)				\
	static inline void _do_trace_##name(struct tracepoint *tp, proto) \
	{ }								\
	static inline void trace_##name(proto)				\
	{ }								\
	static inline int register_trace_##name(void (*probe)(proto))	\
	{								\
		return -ENOSYS;						\
	}								\
	static inline int unregister_trace_##name(void (*probe)(proto))	\
	{								\
		return -ENOSYS;						\
	}

#define tracepoint_update_probe_range(begin, end) do {} while (0)

#define DEFINE_TRACE(name)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)

#define PARAMS(args...) args
#define TP_PROTO(args...)	args
#define TP_ARGS(args...)		args

#define TRACE_EVENT(name, proto, args, struct, assign, print)	\
	DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))

#undef tracepoint_synchronize_unregister
#define tracepoint_synchronize_unregister() do {} while (0)

#endif

#include <linux/ftrace_event.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)

struct trace_print_flags {
	unsigned long		mask;
	const char		*name;
};

#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)

#define alloc_pages_exact_node alloc_pages_node

#endif

#include <linux/hugetlb.h>

/* vma_kernel_pagesize, exported since 2.6.32 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)

#if defined(CONFIG_HUGETLB_PAGE) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
static inline
unsigned long kvm_vma_kernel_pagesize(struct vm_area_struct *vma)
{
	struct hstate *hstate;

	if (!is_vm_hugetlb_page(vma))
		return PAGE_SIZE;

	hstate = hstate_vma(vma);

	return 1UL << (hstate->order + PAGE_SHIFT);
}
#else /* !CONFIG_HUGETLB_SIZE || <= 2.6.26 */
#define kvm_vma_kernel_pagesize(v) PAGE_SIZE
#endif

#else /* >= 2.6.32 */

#define kvm_vma_kernel_pagesize vma_kernel_pagesize

#endif

#ifndef printk_once
/*
 * Print a one-time message (analogous to WARN_ONCE() et al):
 */
#define printk_once(x...) ({			\
	static int __print_once = 1;		\
						\
	if (__print_once) {			\
		__print_once = 0;		\
		printk(x);			\
	}					\
})
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) && !defined(CONFIG_CPU_FREQ)
static inline unsigned int cpufreq_get(unsigned int cpu)
{
	return 0;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#ifndef CONFIG_MMU_NOTIFIER
struct mmu_notifier {};
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
static inline void hlist_del_init_rcu(struct hlist_node *n)
{
	if (!hlist_unhashed(n)) {
		__hlist_del(n);
		n->pprev = NULL;
	}
}
#endif

#ifndef CONFIG_USER_RETURN_NOTIFIER

#include <linux/percpu.h>

struct kvm_user_return_notifier {
	void (*on_user_return)(struct kvm_user_return_notifier *urn);
};

DECLARE_PER_CPU(struct kvm_user_return_notifier *, kvm_urn);

static inline void
kvm_user_return_notifier_register(struct kvm_user_return_notifier *urn)
{
	__get_cpu_var(kvm_urn) = urn;
}

static inline void
kvm_user_return_notifier_unregister(struct kvm_user_return_notifier *urn)
{
	__get_cpu_var(kvm_urn) = NULL;
}

static inline void kvm_fire_urn(void)
{
	struct kvm_user_return_notifier *urn = __get_cpu_var(kvm_urn);

	if (urn)
		urn->on_user_return(urn);
}

#else /* CONFIG_USER_RETURN_NOTIFIER */

#define kvm_user_return_notifier		user_return_notifier
#define kvm_user_return_notifier_register	user_return_notifier_register
#define kvm_user_return_notifier_unregister	user_return_notifier_unregister

static inline void kvm_fire_urn(void) {}

#endif /* CONFIG_USER_RETURN_NOTIFIER */

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)

#ifdef CONFIG_SMP
void kvm_synchronize_srcu_expedited(struct srcu_struct *sp);
#else
static inline void kvm_synchronize_srcu_expedited(struct srcu_struct *sp) { }
#endif

#else

#define kvm_synchronize_srcu_expedited synchronize_srcu_expedited

#endif

int kvm_init_srcu(void);
void kvm_exit_srcu(void);

#ifndef WARN_ONCE
#define WARN_ONCE(condition, format...)	({			\
	static bool __warned;					\
	int __ret_warn_once = !!(condition);			\
								\
	if (unlikely(__ret_warn_once))				\
		if (WARN_ON(!__warned)) 			\
			__warned = true;			\
	unlikely(__ret_warn_once);				\
})
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
#define get_online_cpus lock_cpu_hotplug
#define put_online_cpus unlock_cpu_hotplug
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) || \
    (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) && KERNEL_EXTRAVERSION < 9)
static inline void kvm_getboottime(struct timespec *ts)
{
	struct timespec sys, now = current_kernel_time();
	ktime_get_ts(&sys);
	*ts = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
}
#define kvm_monotonic_to_bootbased(ts)
#else
#define kvm_getboottime			getboottime
#define kvm_monotonic_to_bootbased	monotonic_to_bootbased
#endif

static inline void kvm_clock_warn_suspend_bug(void)
{
#if defined(CONFIG_SUSPEND) && \
    (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) || \
     (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) && KERNEL_EXTRAVERSION < 9))
	printk("kvm: paravirtual wallclock will not work reliably "
	       "accross host suspend/resume\n");
#endif
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) && defined(CONFIG_PCI)
#include <linux/pci.h>
static inline struct pci_dev *
pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn)
{
	if (domain != 0)
		return NULL;
	return pci_get_bus_and_slot(bus, devfn);
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)

#ifdef CONFIG_PREEMPT_RT
#if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,31)
#error PREEMPT_RT only supported on 2.6.31
#endif
#define raw_spinlock_t		atomic_spinlock_t
#define raw_spin_lock_init	atomic_spin_lock_init
#define raw_spin_lock		atomic_spin_lock
#define raw_spin_unlock		atomic_spin_unlock

#else

#define raw_spinlock_t		spinlock_t
#define raw_spin_lock_init	spin_lock_init
#define raw_spin_lock		spin_lock
#define raw_spin_unlock		spin_unlock
#endif

#endif