diff options
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
| -rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 130 | 
1 files changed, 38 insertions, 92 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 2bb070e1722..fd3688a3e23 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -169,68 +169,7 @@ static struct notifier_block xpc_die_notifier = {  	.notifier_call = xpc_system_die,  }; -int (*xpc_setup_partitions_sn) (void); -void (*xpc_teardown_partitions_sn) (void); -enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, -						  unsigned long *rp_pa, -						  size_t *len); -int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); - -void (*xpc_allow_hb) (short partid); -void (*xpc_disallow_hb) (short partid); -void (*xpc_disallow_all_hbs) (void); -void (*xpc_heartbeat_init) (void); -void (*xpc_heartbeat_exit) (void); -void (*xpc_increment_heartbeat) (void); -void (*xpc_offline_heartbeat) (void); -void (*xpc_online_heartbeat) (void); -enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); - -enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); -void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); -u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); -enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); -void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); -void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); -int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); -void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); - -void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, -					  unsigned long remote_rp_pa, -					  int nasid); -void (*xpc_request_partition_reactivation) (struct xpc_partition *part); -void (*xpc_request_partition_deactivation) (struct xpc_partition *part); -void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); - -void (*xpc_process_activate_IRQ_rcvd) (void); -enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); -void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); - -void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); -int (*xpc_partition_engaged) (short partid); -int (*xpc_any_partition_engaged) (void); -void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); -void (*xpc_assume_partition_disengaged) (short partid); - -void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch, -				     unsigned long *irq_flags); -void (*xpc_send_chctl_closereply) (struct xpc_channel *ch, -				   unsigned long *irq_flags); -void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, -				    unsigned long *irq_flags); -void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, -				  unsigned long *irq_flags); -void (*xpc_send_chctl_opencomplete) (struct xpc_channel *ch, -				  unsigned long *irq_flags); - -enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, -					       unsigned long msgqueue_pa); - -enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, -				    void *payload, u16 payload_size, -				    u8 notify_type, xpc_notify_func func, -				    void *key); -void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); +struct xpc_arch_operations xpc_arch_ops;  /*   * Timer function to enforce the timelimit on the partition disengage. @@ -245,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data)  	(void)xpc_partition_disengaged(part);  	DBUG_ON(part->disengage_timeout != 0); -	DBUG_ON(xpc_partition_engaged(XPC_PARTID(part))); +	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));  }  /* @@ -256,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data)  static void  xpc_hb_beater(unsigned long dummy)  { -	xpc_increment_heartbeat(); +	xpc_arch_ops.increment_heartbeat();  	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))  		wake_up_interruptible(&xpc_activate_IRQ_wq); @@ -268,7 +207,7 @@ xpc_hb_beater(unsigned long dummy)  static void  xpc_start_hb_beater(void)  { -	xpc_heartbeat_init(); +	xpc_arch_ops.heartbeat_init();  	init_timer(&xpc_hb_timer);  	xpc_hb_timer.function = xpc_hb_beater;  	xpc_hb_beater(0); @@ -278,7 +217,7 @@ static void  xpc_stop_hb_beater(void)  {  	del_timer_sync(&xpc_hb_timer); -	xpc_heartbeat_exit(); +	xpc_arch_ops.heartbeat_exit();  }  /* @@ -307,7 +246,7 @@ xpc_check_remote_hb(void)  			continue;  		} -		ret = xpc_get_remote_heartbeat(part); +		ret = xpc_arch_ops.get_remote_heartbeat(part);  		if (ret != xpSuccess)  			XPC_DEACTIVATE_PARTITION(part, ret);  	} @@ -358,7 +297,7 @@ xpc_hb_checker(void *ignore)  			force_IRQ = 0;  			dev_dbg(xpc_part, "processing activate IRQs "  				"received\n"); -			xpc_process_activate_IRQ_rcvd(); +			xpc_arch_ops.process_activate_IRQ_rcvd();  		}  		/* wait for IRQ or timeout */ @@ -533,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part)  		init_waitqueue_head(&ch->idle_wq);  	} -	ret = xpc_setup_ch_structures_sn(part); +	ret = xpc_arch_ops.setup_ch_structures(part);  	if (ret != xpSuccess)  		goto out_2; @@ -577,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part)  	/* now we can begin tearing down the infrastructure */ -	xpc_teardown_ch_structures_sn(part); +	xpc_arch_ops.teardown_ch_structures(part);  	kfree(part->remote_openclose_args_base);  	part->remote_openclose_args = NULL; @@ -625,12 +564,12 @@ xpc_activating(void *__partid)  	dev_dbg(xpc_part, "activating partition %d\n", partid); -	xpc_allow_hb(partid); +	xpc_arch_ops.allow_hb(partid);  	if (xpc_setup_ch_structures(part) == xpSuccess) {  		(void)xpc_part_ref(part);	/* this will always succeed */ -		if (xpc_make_first_contact(part) == xpSuccess) { +		if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {  			xpc_mark_partition_active(part);  			xpc_channel_mgr(part);  			/* won't return until partition is deactivating */ @@ -640,12 +579,12 @@ xpc_activating(void *__partid)  		xpc_teardown_ch_structures(part);  	} -	xpc_disallow_hb(partid); +	xpc_arch_ops.disallow_hb(partid);  	xpc_mark_partition_inactive(part);  	if (part->reason == xpReactivating) {  		/* interrupting ourselves results in activating partition */ -		xpc_request_partition_reactivation(part); +		xpc_arch_ops.request_partition_reactivation(part);  	}  	return 0; @@ -718,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)  static void  xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)  { +	int (*n_of_deliverable_payloads) (struct xpc_channel *) = +		xpc_arch_ops.n_of_deliverable_payloads; +  	do {  		/* deliver messages to their intended recipients */ -		while (xpc_n_of_deliverable_payloads(ch) > 0 && +		while (n_of_deliverable_payloads(ch) > 0 &&  		       !(ch->flags & XPC_C_DISCONNECTING)) {  			xpc_deliver_payload(ch);  		} @@ -737,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)  			"wait_event_interruptible_exclusive()\n");  		(void)wait_event_interruptible_exclusive(ch->idle_wq, -				(xpc_n_of_deliverable_payloads(ch) > 0 || +				(n_of_deliverable_payloads(ch) > 0 ||  				 (ch->flags & XPC_C_DISCONNECTING)));  		atomic_dec(&ch->kthreads_idle); @@ -754,6 +696,8 @@ xpc_kthread_start(void *args)  	struct xpc_channel *ch;  	int n_needed;  	unsigned long irq_flags; +	int (*n_of_deliverable_payloads) (struct xpc_channel *) = +		xpc_arch_ops.n_of_deliverable_payloads;  	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",  		partid, ch_number); @@ -782,7 +726,7 @@ xpc_kthread_start(void *args)  			 * additional kthreads to help deliver them. We only  			 * need one less than total #of messages to deliver.  			 */ -			n_needed = xpc_n_of_deliverable_payloads(ch) - 1; +			n_needed = n_of_deliverable_payloads(ch) - 1;  			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))  				xpc_activate_kthreads(ch, n_needed); @@ -810,7 +754,7 @@ xpc_kthread_start(void *args)  	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&  	    atomic_dec_return(&part->nchannels_engaged) == 0) { -		xpc_indicate_partition_disengaged(part); +		xpc_arch_ops.indicate_partition_disengaged(part);  	}  	xpc_msgqueue_deref(ch); @@ -842,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,  	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);  	struct xpc_partition *part = &xpc_partitions[ch->partid];  	struct task_struct *kthread; +	void (*indicate_partition_disengaged) (struct xpc_partition *) = +		xpc_arch_ops.indicate_partition_disengaged;  	while (needed-- > 0) { @@ -863,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,  		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&  			   atomic_inc_return(&part->nchannels_engaged) == 1) { -				xpc_indicate_partition_engaged(part); +			xpc_arch_ops.indicate_partition_engaged(part);  		}  		(void)xpc_part_ref(part);  		xpc_msgqueue_ref(ch); @@ -885,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,  			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&  			    atomic_dec_return(&part->nchannels_engaged) == 0) { -				xpc_indicate_partition_disengaged(part); +				indicate_partition_disengaged(part);  			}  			xpc_msgqueue_deref(ch);  			xpc_part_deref(part); @@ -998,13 +944,13 @@ xpc_setup_partitions(void)  		atomic_set(&part->references, 0);  	} -	return xpc_setup_partitions_sn(); +	return xpc_arch_ops.setup_partitions();  }  static void  xpc_teardown_partitions(void)  { -	xpc_teardown_partitions_sn(); +	xpc_arch_ops.teardown_partitions();  	kfree(xpc_partitions);  } @@ -1060,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason)  				disengage_timeout = part->disengage_timeout;  		} -		if (xpc_any_partition_engaged()) { +		if (xpc_arch_ops.any_partition_engaged()) {  			if (time_is_before_jiffies(printmsg_time)) {  				dev_info(xpc_part, "waiting for remote "  					 "partitions to deactivate, timeout in " @@ -1091,7 +1037,7 @@ xpc_do_exit(enum xp_retval reason)  	} while (1); -	DBUG_ON(xpc_any_partition_engaged()); +	DBUG_ON(xpc_arch_ops.any_partition_engaged());  	xpc_teardown_rsvd_page(); @@ -1156,15 +1102,15 @@ xpc_die_deactivate(void)  	/* keep xpc_hb_checker thread from doing anything (just in case) */  	xpc_exiting = 1; -	xpc_disallow_all_hbs();	/*indicate we're deactivated */ +	xpc_arch_ops.disallow_all_hbs();   /*indicate we're deactivated */  	for (partid = 0; partid < xp_max_npartitions; partid++) {  		part = &xpc_partitions[partid]; -		if (xpc_partition_engaged(partid) || +		if (xpc_arch_ops.partition_engaged(partid) ||  		    part->act_state != XPC_P_AS_INACTIVE) { -			xpc_request_partition_deactivation(part); -			xpc_indicate_partition_disengaged(part); +			xpc_arch_ops.request_partition_deactivation(part); +			xpc_arch_ops.indicate_partition_disengaged(part);  		}  	} @@ -1181,7 +1127,7 @@ xpc_die_deactivate(void)  	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;  	while (1) { -		any_engaged = xpc_any_partition_engaged(); +		any_engaged = xpc_arch_ops.any_partition_engaged();  		if (!any_engaged) {  			dev_info(xpc_part, "all partitions have deactivated\n");  			break; @@ -1190,7 +1136,7 @@ xpc_die_deactivate(void)  		if (!keep_waiting--) {  			for (partid = 0; partid < xp_max_npartitions;  			     partid++) { -				if (xpc_partition_engaged(partid)) { +				if (xpc_arch_ops.partition_engaged(partid)) {  					dev_info(xpc_part, "deactivate from "  						 "remote partition %d timed "  						 "out\n", partid); @@ -1237,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)  		/* fall through */  	case DIE_MCA_MONARCH_ENTER:  	case DIE_INIT_MONARCH_ENTER: -		xpc_offline_heartbeat(); +		xpc_arch_ops.offline_heartbeat();  		break;  	case DIE_KDEBUG_LEAVE: @@ -1248,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)  		/* fall through */  	case DIE_MCA_MONARCH_LEAVE:  	case DIE_INIT_MONARCH_LEAVE: -		xpc_online_heartbeat(); +		xpc_arch_ops.online_heartbeat();  		break;  	}  #else  |