diff options
Diffstat (limited to 'drivers/xen')
| -rw-r--r-- | drivers/xen/Kconfig | 6 | ||||
| -rw-r--r-- | drivers/xen/events.c | 72 | ||||
| -rw-r--r-- | drivers/xen/fallback.c | 3 | ||||
| -rw-r--r-- | drivers/xen/tmem.c | 55 | ||||
| -rw-r--r-- | drivers/xen/xen-acpi-processor.c | 85 | ||||
| -rw-r--r-- | drivers/xen/xen-pciback/pci_stub.c | 59 | ||||
| -rw-r--r-- | drivers/xen/xen-selfballoon.c | 13 | 
7 files changed, 205 insertions, 88 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5a32232cf7c..dd4d9cb8624 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -145,9 +145,9 @@ config SWIOTLB_XEN  	select SWIOTLB  config XEN_TMEM -	bool +	tristate  	depends on !ARM -	default y if (CLEANCACHE || FRONTSWAP) +	default m if (CLEANCACHE || FRONTSWAP)  	help  	  Shim to interface in-kernel Transcendent Memory hooks  	  (e.g. cleancache and frontswap) to Xen tmem hypercalls. @@ -182,7 +182,7 @@ config XEN_PRIVCMD  config XEN_STUB  	bool "Xen stub drivers" -	depends on XEN && X86_64 +	depends on XEN && X86_64 && BROKEN  	default n  	help  	  Allow kernel to install stub drivers, to reserve space for Xen drivers, diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d17aa41a904..d8cc8127f19 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -85,8 +85,7 @@ enum xen_irq_type {   * event channel - irq->event channel mapping   * cpu - cpu this event channel is bound to   * index - type-specific information: - *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM - *           guest, or GSI (real passthrough IRQ) of the device. + *    PIRQ - physical IRQ, GSI, flags, and owner domain   *    VIRQ - virq number   *    IPI - IPI vector   *    EVTCHN - @@ -105,7 +104,6 @@ struct irq_info {  		struct {  			unsigned short pirq;  			unsigned short gsi; -			unsigned char vector;  			unsigned char flags;  			uint16_t domid;  		} pirq; @@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq,  				   unsigned short evtchn,  				   unsigned short pirq,  				   unsigned short gsi, -				   unsigned short vector,  				   uint16_t domid,  				   unsigned char flags)  { @@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq,  	info->u.pirq.pirq = pirq;  	info->u.pirq.gsi = gsi; -	info->u.pirq.vector = vector;  	info->u.pirq.domid = domid;  	info->u.pirq.flags = flags;  } @@ -403,11 +399,23 @@ static void unmask_evtchn(int port)  	if (unlikely((cpu != cpu_from_evtchn(port))))  		do_hypercall = 1; -	else +	else { +		/* +		 * Need to clear the mask before checking pending to +		 * avoid a race with an event becoming pending. +		 * +		 * EVTCHNOP_unmask will only trigger an upcall if the +		 * mask bit was set, so if a hypercall is needed +		 * remask the event. +		 */ +		sync_clear_bit(port, BM(&s->evtchn_mask[0]));  		evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); -	if (unlikely(evtchn_pending && xen_hvm_domain())) -		do_hypercall = 1; +		if (unlikely(evtchn_pending && xen_hvm_domain())) { +			sync_set_bit(port, BM(&s->evtchn_mask[0])); +			do_hypercall = 1; +		} +	}  	/* Slow path (hypercall) if this is a non-local port or if this is  	 * an hvm domain and an event is pending (hvm domains don't have @@ -418,8 +426,6 @@ static void unmask_evtchn(int port)  	} else {  		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); -		sync_clear_bit(port, BM(&s->evtchn_mask[0])); -  		/*  		 * The following is basically the equivalent of  		 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose @@ -509,6 +515,9 @@ static void xen_free_irq(unsigned irq)  {  	struct irq_info *info = irq_get_handler_data(irq); +	if (WARN_ON(!info)) +		return; +  	list_del(&info->list);  	irq_set_handler_data(irq, NULL); @@ -704,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,  		goto out;  	} -	xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, +	xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,  			       shareable ? PIRQ_SHAREABLE : 0);  	pirq_query_unmask(irq); @@ -752,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)  }  int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, -			     int pirq, int vector, const char *name, -			     domid_t domid) +			     int pirq, const char *name, domid_t domid)  {  	int irq, ret; @@ -766,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,  	irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,  			name); -	xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); +	xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);  	ret = irq_set_msi_desc(irq, msidesc);  	if (ret < 0)  		goto error_irq; @@ -998,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq)  	int evtchn = evtchn_from_irq(irq);  	struct irq_info *info = irq_get_handler_data(irq); +	if (WARN_ON(!info)) +		return; +  	mutex_lock(&irq_mapping_update_lock);  	if (info->refcnt > 0) { @@ -1125,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,  void unbind_from_irqhandler(unsigned int irq, void *dev_id)  { +	struct irq_info *info = irq_get_handler_data(irq); + +	if (WARN_ON(!info)) +		return;  	free_irq(irq, dev_id);  	unbind_from_irq(irq);  } @@ -1306,7 +1321,7 @@ static void __xen_evtchn_do_upcall(void)  {  	int start_word_idx, start_bit_idx;  	int word_idx, bit_idx; -	int i; +	int i, irq;  	int cpu = get_cpu();  	struct shared_info *s = HYPERVISOR_shared_info;  	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1314,6 +1329,8 @@ static void __xen_evtchn_do_upcall(void)  	do {  		xen_ulong_t pending_words; +		xen_ulong_t pending_bits; +		struct irq_desc *desc;  		vcpu_info->evtchn_upcall_pending = 0; @@ -1325,6 +1342,17 @@ static void __xen_evtchn_do_upcall(void)  		 * selector flag. xchg_xen_ulong must contain an  		 * appropriate barrier.  		 */ +		if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { +			int evtchn = evtchn_from_irq(irq); +			word_idx = evtchn / BITS_PER_LONG; +			pending_bits = evtchn % BITS_PER_LONG; +			if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { +				desc = irq_to_desc(irq); +				if (desc) +					generic_handle_irq_desc(irq, desc); +			} +		} +  		pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);  		start_word_idx = __this_cpu_read(current_word_idx); @@ -1333,7 +1361,6 @@ static void __xen_evtchn_do_upcall(void)  		word_idx = start_word_idx;  		for (i = 0; pending_words != 0; i++) { -			xen_ulong_t pending_bits;  			xen_ulong_t words;  			words = MASK_LSBS(pending_words, word_idx); @@ -1362,8 +1389,7 @@ static void __xen_evtchn_do_upcall(void)  			do {  				xen_ulong_t bits; -				int port, irq; -				struct irq_desc *desc; +				int port;  				bits = MASK_LSBS(pending_bits, bit_idx); @@ -1436,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq)  {  	struct irq_info *info = info_for_irq(irq); +	if (WARN_ON(!info)) +		return; +  	/* Make sure the irq is masked, since the new event channel  	   will also be masked. */  	disable_irq(irq); @@ -1709,7 +1738,12 @@ void xen_poll_irq(int irq)  int xen_test_irq_shared(int irq)  {  	struct irq_info *info = info_for_irq(irq); -	struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; +	struct physdev_irq_status_query irq_status; + +	if (WARN_ON(!info)) +		return -ENOENT; + +	irq_status.irq = info->u.pirq.pirq;  	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))  		return 0; diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c index 0ef7c4d40f8..b04fb64c5a9 100644 --- a/drivers/xen/fallback.c +++ b/drivers/xen/fallback.c @@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg)  }  EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); -int HYPERVISOR_physdev_op_compat(int cmd, void *arg) +int xen_physdev_op_compat(int cmd, void *arg)  {  	struct physdev_op op;  	int rc; @@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg)  	return rc;  } +EXPORT_SYMBOL_GPL(xen_physdev_op_compat); diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 3ee836d4258..e3600be4e7f 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -5,6 +5,7 @@   * Author: Dan Magenheimer   */ +#include <linux/module.h>  #include <linux/kernel.h>  #include <linux/types.h>  #include <linux/init.h> @@ -128,6 +129,7 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)  	return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);  } +#ifndef CONFIG_XEN_TMEM_MODULE  bool __read_mostly tmem_enabled = false;  static int __init enable_tmem(char *s) @@ -136,6 +138,7 @@ static int __init enable_tmem(char *s)  	return 1;  }  __setup("tmem", enable_tmem); +#endif  #ifdef CONFIG_CLEANCACHE  static int xen_tmem_destroy_pool(u32 pool_id) @@ -227,16 +230,21 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)  	return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);  } -static bool __initdata use_cleancache = true; - +static bool disable_cleancache __read_mostly; +static bool disable_selfballooning __read_mostly; +#ifdef CONFIG_XEN_TMEM_MODULE +module_param(disable_cleancache, bool, S_IRUGO); +module_param(disable_selfballooning, bool, S_IRUGO); +#else  static int __init no_cleancache(char *s)  { -	use_cleancache = false; +	disable_cleancache = true;  	return 1;  }  __setup("nocleancache", no_cleancache); +#endif -static struct cleancache_ops __initdata tmem_cleancache_ops = { +static struct cleancache_ops tmem_cleancache_ops = {  	.put_page = tmem_cleancache_put_page,  	.get_page = tmem_cleancache_get_page,  	.invalidate_page = tmem_cleancache_flush_page, @@ -353,54 +361,71 @@ static void tmem_frontswap_init(unsigned ignored)  		    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);  } -static bool __initdata use_frontswap = true; - +static bool disable_frontswap __read_mostly; +static bool disable_frontswap_selfshrinking __read_mostly; +#ifdef CONFIG_XEN_TMEM_MODULE +module_param(disable_frontswap, bool, S_IRUGO); +module_param(disable_frontswap_selfshrinking, bool, S_IRUGO); +#else  static int __init no_frontswap(char *s)  { -	use_frontswap = false; +	disable_frontswap = true;  	return 1;  }  __setup("nofrontswap", no_frontswap); +#endif -static struct frontswap_ops __initdata tmem_frontswap_ops = { +static struct frontswap_ops tmem_frontswap_ops = {  	.store = tmem_frontswap_store,  	.load = tmem_frontswap_load,  	.invalidate_page = tmem_frontswap_flush_page,  	.invalidate_area = tmem_frontswap_flush_area,  	.init = tmem_frontswap_init  }; +#else	/* CONFIG_FRONTSWAP */ +#define disable_frontswap_selfshrinking 1  #endif -static int __init xen_tmem_init(void) +static int xen_tmem_init(void)  {  	if (!xen_domain())  		return 0;  #ifdef CONFIG_FRONTSWAP -	if (tmem_enabled && use_frontswap) { +	if (tmem_enabled && !disable_frontswap) {  		char *s = ""; -		struct frontswap_ops old_ops = +		struct frontswap_ops *old_ops =  			frontswap_register_ops(&tmem_frontswap_ops);  		tmem_frontswap_poolid = -1; -		if (old_ops.init != NULL) +		if (IS_ERR(old_ops) || old_ops) { +			if (IS_ERR(old_ops)) +				return PTR_ERR(old_ops);  			s = " (WARNING: frontswap_ops overridden)"; +		}  		printk(KERN_INFO "frontswap enabled, RAM provided by "  				 "Xen Transcendent Memory%s\n", s);  	}  #endif  #ifdef CONFIG_CLEANCACHE  	BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); -	if (tmem_enabled && use_cleancache) { +	if (tmem_enabled && !disable_cleancache) {  		char *s = ""; -		struct cleancache_ops old_ops = +		struct cleancache_ops *old_ops =  			cleancache_register_ops(&tmem_cleancache_ops); -		if (old_ops.init_fs != NULL) +		if (old_ops)  			s = " (WARNING: cleancache_ops overridden)";  		printk(KERN_INFO "cleancache enabled, RAM provided by "  				 "Xen Transcendent Memory%s\n", s);  	}  #endif +#ifdef CONFIG_XEN_SELFBALLOONING +	xen_selfballoon_init(!disable_selfballooning, +				!disable_frontswap_selfshrinking); +#endif  	return 0;  }  module_init(xen_tmem_init) +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>"); +MODULE_DESCRIPTION("Shim to Xen transcendent memory"); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index f3278a6603c..8abd7d57903 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -25,6 +25,7 @@  #include <linux/init.h>  #include <linux/module.h>  #include <linux/types.h> +#include <linux/syscore_ops.h>  #include <acpi/acpi_bus.h>  #include <acpi/acpi_drivers.h>  #include <acpi/processor.h> @@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex);  /* Which ACPI ID we have processed from 'struct acpi_processor'. */  static unsigned long *acpi_ids_done;  /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ -static unsigned long __initdata *acpi_id_present; +static unsigned long *acpi_id_present;  /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ -static unsigned long __initdata *acpi_id_cst_present; +static unsigned long *acpi_id_cst_present;  static int push_cxx_to_hypervisor(struct acpi_processor *_pr)  { @@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void)   * for_each_[present|online]_cpu macros which are banded to the virtual   * CPU amount.   */ -static acpi_status __init +static acpi_status  read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)  {  	u32 acpi_id; @@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)  	return AE_OK;  } -static int __init check_acpi_ids(struct acpi_processor *pr_backup) +static int check_acpi_ids(struct acpi_processor *pr_backup)  {  	if (!pr_backup)  		return -ENODEV; +	if (acpi_id_present && acpi_id_cst_present) +		/* OK, done this once .. skip to uploading */ +		goto upload; +  	/* All online CPUs have been processed at this stage. Now verify  	 * whether in fact "online CPUs" == physical CPUs.  	 */ @@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)  			    read_acpi_id, NULL, NULL, NULL);  	acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); +upload:  	if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {  		unsigned int i;  		for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { @@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)  			(void)upload_pm_data(pr_backup);  		}  	} -	kfree(acpi_id_present); -	acpi_id_present = NULL; -	kfree(acpi_id_cst_present); -	acpi_id_cst_present = NULL; +  	return 0;  }  static int __init check_prereq(void) @@ -467,10 +470,47 @@ static void free_acpi_perf_data(void)  	free_percpu(acpi_perf_data);  } -static int __init xen_acpi_processor_init(void) +static int xen_upload_processor_pm_data(void)  {  	struct acpi_processor *pr_backup = NULL;  	unsigned int i; +	int rc = 0; + +	pr_info(DRV_NAME "Uploading Xen processor PM info\n"); + +	for_each_possible_cpu(i) { +		struct acpi_processor *_pr; +		_pr = per_cpu(processors, i /* APIC ID */); +		if (!_pr) +			continue; + +		if (!pr_backup) { +			pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); +			if (pr_backup) +				memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); +		} +		(void)upload_pm_data(_pr); +	} + +	rc = check_acpi_ids(pr_backup); +	kfree(pr_backup); + +	return rc; +} + +static void xen_acpi_processor_resume(void) +{ +	bitmap_zero(acpi_ids_done, nr_acpi_bits); +	xen_upload_processor_pm_data(); +} + +static struct syscore_ops xap_syscore_ops = { +	.resume	= xen_acpi_processor_resume, +}; + +static int __init xen_acpi_processor_init(void) +{ +	unsigned int i;  	int rc = check_prereq();  	if (rc) @@ -505,33 +545,21 @@ static int __init xen_acpi_processor_init(void)  		pr = per_cpu(processors, i);  		perf = per_cpu_ptr(acpi_perf_data, i); +		if (!pr) +			continue; +  		pr->performance = perf;  		rc = acpi_processor_get_performance_info(pr);  		if (rc)  			goto err_out;  	} -	for_each_possible_cpu(i) { -		struct acpi_processor *_pr; -		_pr = per_cpu(processors, i /* APIC ID */); -		if (!_pr) -			continue; - -		if (!pr_backup) { -			pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); -			if (pr_backup) -				memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); -		} -		(void)upload_pm_data(_pr); -	} -	rc = check_acpi_ids(pr_backup); - -	kfree(pr_backup); -	pr_backup = NULL; - +	rc = xen_upload_processor_pm_data();  	if (rc)  		goto err_unregister; +	register_syscore_ops(&xap_syscore_ops); +  	return 0;  err_unregister:  	for_each_possible_cpu(i) { @@ -549,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void)  {  	int i; +	unregister_syscore_ops(&xap_syscore_ops);  	kfree(acpi_ids_done); +	kfree(acpi_id_present); +	kfree(acpi_id_cst_present);  	for_each_possible_cpu(i) {  		struct acpi_processor_performance *perf;  		perf = per_cpu_ptr(acpi_perf_data, i); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9204126f156..a2278ba7fb2 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -17,6 +17,7 @@  #include <xen/events.h>  #include <asm/xen/pci.h>  #include <asm/xen/hypervisor.h> +#include <xen/interface/physdev.h>  #include "pciback.h"  #include "conf_space.h"  #include "conf_space_quirks.h" @@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)  static void pcistub_device_release(struct kref *kref)  {  	struct pcistub_device *psdev; +	struct pci_dev *dev;  	struct xen_pcibk_dev_data *dev_data;  	psdev = container_of(kref, struct pcistub_device, kref); -	dev_data = pci_get_drvdata(psdev->dev); +	dev = psdev->dev; +	dev_data = pci_get_drvdata(dev); -	dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); +	dev_dbg(&dev->dev, "pcistub_device_release\n"); -	xen_unregister_device_domain_owner(psdev->dev); +	xen_unregister_device_domain_owner(dev);  	/* Call the reset function which does not take lock as this  	 * is called from "unbind" which takes a device_lock mutex.  	 */ -	__pci_reset_function_locked(psdev->dev); -	if (pci_load_and_free_saved_state(psdev->dev, -					  &dev_data->pci_saved_state)) { -		dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); -	} else -		pci_restore_state(psdev->dev); +	__pci_reset_function_locked(dev); +	if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) +		dev_dbg(&dev->dev, "Could not reload PCI state\n"); +	else +		pci_restore_state(dev); + +	if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { +		struct physdev_pci_device ppdev = { +			.seg = pci_domain_nr(dev->bus), +			.bus = dev->bus->number, +			.devfn = dev->devfn +		}; +		int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, +						&ppdev); + +		if (err) +			dev_warn(&dev->dev, "MSI-X release failed (%d)\n", +				 err); +	}  	/* Disable the device */ -	xen_pcibk_reset_device(psdev->dev); +	xen_pcibk_reset_device(dev);  	kfree(dev_data); -	pci_set_drvdata(psdev->dev, NULL); +	pci_set_drvdata(dev, NULL);  	/* Clean-up the device */ -	xen_pcibk_config_free_dyn_fields(psdev->dev); -	xen_pcibk_config_free_dev(psdev->dev); +	xen_pcibk_config_free_dyn_fields(dev); +	xen_pcibk_config_free_dev(dev); -	psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; -	pci_dev_put(psdev->dev); +	dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; +	pci_dev_put(dev);  	kfree(psdev);  } @@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev)  	if (err)  		goto config_release; +	if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { +		struct physdev_pci_device ppdev = { +			.seg = pci_domain_nr(dev->bus), +			.bus = dev->bus->number, +			.devfn = dev->devfn +		}; + +		err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); +		if (err) +			dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", +				err); +	} +  	/* We need the device active to save the state. */  	dev_dbg(&dev->dev, "save state of device\n");  	pci_save_state(dev); diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 2552d3e0a70..f2ef569c7cc 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -121,7 +121,7 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);  static bool frontswap_selfshrinking __read_mostly;  /* Enable/disable with kernel boot option. */ -static bool use_frontswap_selfshrink __initdata = true; +static bool use_frontswap_selfshrink = true;  /*   * The default values for the following parameters were deemed reasonable @@ -185,7 +185,7 @@ static int __init xen_nofrontswap_selfshrink_setup(char *s)  __setup("noselfshrink", xen_nofrontswap_selfshrink_setup);  /* Disable with kernel boot option. */ -static bool use_selfballooning __initdata = true; +static bool use_selfballooning = true;  static int __init xen_noselfballooning_setup(char *s)  { @@ -196,7 +196,7 @@ static int __init xen_noselfballooning_setup(char *s)  __setup("noselfballooning", xen_noselfballooning_setup);  #else /* !CONFIG_FRONTSWAP */  /* Enable with kernel boot option. */ -static bool use_selfballooning __initdata = false; +static bool use_selfballooning;  static int __init xen_selfballooning_setup(char *s)  { @@ -537,7 +537,7 @@ int register_xen_selfballooning(struct device *dev)  }  EXPORT_SYMBOL(register_xen_selfballooning); -static int __init xen_selfballoon_init(void) +int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)  {  	bool enable = false; @@ -571,7 +571,4 @@ static int __init xen_selfballoon_init(void)  	return 0;  } - -subsys_initcall(xen_selfballoon_init); - -MODULE_LICENSE("GPL"); +EXPORT_SYMBOL(xen_selfballoon_init);  |