[openib-general] [PATCH] dynamic device init for IP2PR
Libor Michalek
Wed Oct 13 17:41:01 PDT 2004
On Wed, Oct 13, 2004 at 03:59:48PM -0700, Roland Dreier wrote:
>
> For bonus points change ip2pr to use ib_register_client()
> ib_unregister_client() so we can get rid of these warnings
> (and so it works when HCA drivers are loaded after SDP):
No problem, here's the patch. I tried it out by unloading/loading mthca.
-Libor
Index: infiniband/ulp/ipoib/ip2pr_priv.h
===================================================================
--- infiniband/ulp/ipoib/ip2pr_priv.h (revision 988)
+++ infiniband/ulp/ipoib/ip2pr_priv.h (working copy)
@@ -250,7 +250,7 @@
*/
struct ip2pr_gid_pr_element {
struct ib_path_record path_record;
- u32 usage; /* last used time. */
+ u32 usage; /* last used time. */
struct ip2pr_gid_pr_element *next;
struct ip2pr_gid_pr_element **p_next;
};
Index: infiniband/ulp/ipoib/ip2pr_mod.c
===================================================================
--- infiniband/ulp/ipoib/ip2pr_mod.c (revision 988)
+++ infiniband/ulp/ipoib/ip2pr_mod.c (working copy)
@@ -27,14 +27,12 @@
MODULE_DESCRIPTION("IB path record lookup module");
MODULE_LICENSE("Dual BSD/GPL");
-extern s32 ip2pr_link_addr_init(void);
-extern s32 ip2pr_link_addr_cleanup(void);
-extern s32 ip2pr_user_lookup(unsigned long arg);
-extern s32 gid2pr_user_lookup(unsigned long arg);
-extern s32 ip2pr_proc_fs_init(void);
-extern s32 ip2pr_proc_fs_cleanup(void);
-extern s32 ip2pr_src_gid_init(void);
-extern s32 ip2pr_src_gid_cleanup(void);
+extern int ip2pr_link_addr_init(void);
+extern int ip2pr_link_addr_cleanup(void);
+extern int ip2pr_user_lookup(unsigned long arg);
+extern int gid2pr_user_lookup(unsigned long arg);
+extern int ip2pr_proc_fs_init(void);
+extern int ip2pr_proc_fs_cleanup(void);
static int ip2pr_major_number = 240;
static int ip2pr_open(struct inode *inode, struct file *fp);
@@ -93,70 +91,86 @@
/* ip2pr_driver_init_module -- initialize the PathRecord Lookup host module */
int __init ip2pr_driver_init_module(void)
{
- s32 result = 0;
+ int result = 0;
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
"INIT: Path Record Lookup module load.");
- result =
- register_chrdev(ip2pr_major_number, IP2PR_DEVNAME, &ip2pr_fops);
+ result = register_chrdev(ip2pr_major_number,
+ IP2PR_DEVNAME,
+ &ip2pr_fops);
if (0 > result) {
- TS_REPORT_FATAL(MOD_IP2PR, "Device registration failed");
- return (result);
+
+ TS_REPORT_FATAL(MOD_IP2PR,
+ "Device registration error <%d>", result);
+ goto error_dev;
}
- if (ip2pr_major_number == 0)
+
+ if (0 == ip2pr_major_number) {
+
ip2pr_major_number = result;
+ }
result = ip2pr_proc_fs_init();
if (0 > result) {
- TS_REPORT_FATAL(MOD_IP2PR, "Init: Error creating proc entries");
- unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME);
- return (result);
+
+ TS_REPORT_FATAL(MOD_IP2PR,
+ "Error <%d> creating proc entries", result);
+ goto error_fs;
}
result = ip2pr_link_addr_init();
if (0 > result) {
- TS_REPORT_FATAL(MOD_IP2PR, "Device resource allocation failed");
- (void)ip2pr_proc_fs_cleanup();
- unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME);
- return (result);
- }
- result = ip2pr_src_gid_init();
- if (0 > result) {
- TS_REPORT_FATAL(MOD_IP2PR, "Gid resource allocation failed");
- (void)ip2pr_link_addr_cleanup();
- (void)ip2pr_proc_fs_cleanup();
- unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME);
- return (result);
+ TS_REPORT_FATAL(MOD_IP2PR,
+ "Device resource allocation error <%d>",
+ result);
+ goto error_lnk;
}
- return (result);
+ return 0;
+error_lnk:
+ (void)ip2pr_proc_fs_cleanup();
+error_fs:
+ unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME);
+error_dev:
+ return result;
}
static void __exit ip2pr_driver_cleanup_module(void)
{
+ int result;
+
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
"INIT: Path Record Lookup module load.");
-
- if (unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME) != 0) {
- TS_REPORT_WARN(MOD_UDAPL, "Cannot unregister device");
- }
-
/*
- * Src Gid Cleanup
- */
- (void)ip2pr_src_gid_cleanup();
- /*
* link level addressing services.
*/
(void)ip2pr_link_addr_cleanup();
-
/*
* proc tables
*/
(void)ip2pr_proc_fs_cleanup();
+ /*
+ * unregister character device.
+ */
+ result = unregister_chrdev(ip2pr_major_number, IP2PR_DEVNAME);
+ if (result) {
+ TS_REPORT_WARN(MOD_IP2PR, "Cannot unregister device");
+ }
+
+ return;
}
module_init(ip2pr_driver_init_module);
module_exit(ip2pr_driver_cleanup_module);
+
+
+
+
+
+
+
+
+
+
Index: infiniband/ulp/ipoib/ip2pr_link.c
===================================================================
--- infiniband/ulp/ipoib/ip2pr_link.c (revision 988)
+++ infiniband/ulp/ipoib/ip2pr_link.c (working copy)
@@ -27,14 +27,13 @@
static tTS_KERNEL_TIMER_STRUCT _tsIp2prPathTimer;
static tIP2PR_PATH_LOOKUP_ID _tsIp2prPathLookupId = 0;
-static struct ib_event_handler _tsIp2prEventHandle[IP2PR_MAX_HCAS];
static unsigned int ip2pr_total_req = 0;
static unsigned int ip2pr_arp_timeout = 0;
static unsigned int ip2pr_path_timeout = 0;
static unsigned int ip2pr_total_fail = 0;
-static struct ip2pr_link_root _tsIp2prLinkRoot = {
+static struct ip2pr_link_root _link_root = {
wait_list:NULL,
path_list:NULL,
wait_lock:SPIN_LOCK_UNLOCKED,
@@ -54,6 +53,15 @@
((TS_IP2PR_PATH_LOOKUP_INVALID == ++_tsIp2prPathLookupId) ? \
++_tsIp2prPathLookupId : _tsIp2prPathLookupId)
+static void ip2pr_device_init_one(struct ib_device *device);
+static void ip2pr_device_remove_one(struct ib_device *device);
+
+static struct ib_client ip2pr_client = {
+ .name = "ip2pr",
+ .add = ip2pr_device_init_one,
+ .remove = ip2pr_device_remove_one
+};
+
/**
* Path Record lookup caching
*/
@@ -63,7 +71,7 @@
{
struct ip2pr_path_element *path_elmt;
- for (path_elmt = _tsIp2prLinkRoot.path_list;
+ for (path_elmt = _link_root.path_list;
NULL != path_elmt; path_elmt = path_elmt->next)
if (ip_addr == path_elmt->dst_addr)
break;
@@ -72,8 +80,10 @@
}
/* ip2pr_path_element_create -- create an entry for a path record element */
-static s32 ip2pr_path_element_create(u32 dst_addr, u32 src_addr,
- tTS_IB_PORT hw_port, struct ib_device *ca,
+static s32 ip2pr_path_element_create(u32 dst_addr,
+ u32 src_addr,
+ tTS_IB_PORT hw_port,
+ struct ib_device *ca,
struct ib_path_record *path_r,
struct ip2pr_path_element **return_elmt)
{
@@ -82,23 +92,23 @@
TS_CHECK_NULL(path_r, -EINVAL);
TS_CHECK_NULL(return_elmt, -EINVAL);
- TS_CHECK_NULL(_tsIp2prLinkRoot.path_cache, -EINVAL);
+ TS_CHECK_NULL(_link_root.path_cache, -EINVAL);
- path_elmt = kmem_cache_alloc(_tsIp2prLinkRoot.path_cache, SLAB_ATOMIC);
+ path_elmt = kmem_cache_alloc(_link_root.path_cache, SLAB_ATOMIC);
if (NULL == path_elmt)
return -ENOMEM;
memset(path_elmt, 0, sizeof(*path_elmt));
- spin_lock_irqsave(&_tsIp2prLinkRoot.path_lock, flags);
- path_elmt->next = _tsIp2prLinkRoot.path_list;
- _tsIp2prLinkRoot.path_list = path_elmt;
- path_elmt->p_next = &_tsIp2prLinkRoot.path_list;
+ spin_lock_irqsave(&_link_root.path_lock, flags);
+ path_elmt->next = _link_root.path_list;
+ _link_root.path_list = path_elmt;
+ path_elmt->p_next = &_link_root.path_list;
if (NULL != path_elmt->next)
path_elmt->next->p_next = &path_elmt->next;
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.path_lock, flags);
+ spin_unlock_irqrestore(&_link_root.path_lock, flags);
/*
* set values
*/
@@ -120,9 +130,9 @@
unsigned long flags;
TS_CHECK_NULL(path_elmt, -EINVAL);
- TS_CHECK_NULL(_tsIp2prLinkRoot.path_cache, -EINVAL);
+ TS_CHECK_NULL(_link_root.path_cache, -EINVAL);
- spin_lock_irqsave(&_tsIp2prLinkRoot.path_lock, flags);
+ spin_lock_irqsave(&_link_root.path_lock, flags);
if (NULL != path_elmt->p_next) {
if (NULL != path_elmt->next)
path_elmt->next->p_next = path_elmt->p_next;
@@ -132,9 +142,9 @@
path_elmt->p_next = NULL;
path_elmt->next = NULL;
} /* if */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.path_lock, flags);
+ spin_unlock_irqrestore(&_link_root.path_lock, flags);
- kmem_cache_free(_tsIp2prLinkRoot.path_cache, path_elmt);
+ kmem_cache_free(_link_root.path_cache, path_elmt);
return 0;
}
@@ -176,10 +186,10 @@
unsigned long flags = 0;
TS_CHECK_NULL(ipoib_wait, -EINVAL);
- TS_CHECK_NULL(_tsIp2prLinkRoot.wait_cache, -EINVAL);
+ TS_CHECK_NULL(_link_root.wait_cache, -EINVAL);
if (use_lock)
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
if (NULL != ipoib_wait->p_next) {
if (NULL != ipoib_wait->next) {
@@ -192,9 +202,9 @@
ipoib_wait->next = NULL;
} /* if */
if (use_lock)
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
- kmem_cache_free(_tsIp2prLinkRoot.wait_cache, ipoib_wait);
+ kmem_cache_free(_link_root.wait_cache, ipoib_wait);
return 0;
}
@@ -233,8 +243,8 @@
* rearm the timer (check for neighbour nud status?)
*/
ipoib_wait->prev_timeout = (ipoib_wait->prev_timeout * 2); /* backoff */
- if (ipoib_wait->prev_timeout > _tsIp2prLinkRoot.backoff)
- ipoib_wait->prev_timeout = _tsIp2prLinkRoot.backoff;
+ if (ipoib_wait->prev_timeout > _link_root.backoff)
+ ipoib_wait->prev_timeout = _link_root.backoff;
ipoib_wait->timer.run_time =
jiffies + (ipoib_wait->prev_timeout * HZ) +
(jiffies & 0x0f);
@@ -283,9 +293,9 @@
{
struct ip2pr_ipoib_wait *ipoib_wait;
- TS_CHECK_NULL(_tsIp2prLinkRoot.wait_cache, NULL);
+ TS_CHECK_NULL(_link_root.wait_cache, NULL);
- ipoib_wait = kmem_cache_alloc(_tsIp2prLinkRoot.wait_cache, SLAB_ATOMIC);
+ ipoib_wait = kmem_cache_alloc(_link_root.wait_cache, SLAB_ATOMIC);
if (NULL != ipoib_wait) {
memset(ipoib_wait, 0, sizeof(*ipoib_wait));
@@ -296,7 +306,7 @@
if (LOOKUP_IP2PR == ltype) {
tsKernelTimerInit(&ipoib_wait->timer);
ipoib_wait->timer.run_time = jiffies +
- (_tsIp2prLinkRoot.retry_timeout * HZ);
+ (_link_root.retry_timeout * HZ);
ipoib_wait->timer.function = ip2pr_ipoib_wait_timeout;
ipoib_wait->timer.arg = ipoib_wait;
}
@@ -310,8 +320,8 @@
ipoib_wait->func = (void *) func;
ipoib_wait->plid = plid;
ipoib_wait->dev = 0;
- ipoib_wait->retry = _tsIp2prLinkRoot.max_retries;
- ipoib_wait->prev_timeout = _tsIp2prLinkRoot.retry_timeout;
+ ipoib_wait->retry = _link_root.max_retries;
+ ipoib_wait->prev_timeout = _link_root.retry_timeout;
ipoib_wait->tid = TS_IB_CLIENT_QUERY_TID_INVALID;
ipoib_wait->hw_port = 0;
ipoib_wait->ca = NULL;
@@ -338,17 +348,17 @@
return -EFAULT;
} /* if */
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
- ipoib_wait->next = _tsIp2prLinkRoot.wait_list;
- _tsIp2prLinkRoot.wait_list = ipoib_wait;
- ipoib_wait->p_next = &_tsIp2prLinkRoot.wait_list;
+ ipoib_wait->next = _link_root.wait_list;
+ _link_root.wait_list = ipoib_wait;
+ ipoib_wait->p_next = &_link_root.wait_list;
if (NULL != ipoib_wait->next) {
ipoib_wait->next->p_next = &ipoib_wait->next;
} /* if */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
/*
* Start timer only for IP 2 PR lookup
@@ -373,8 +383,8 @@
unsigned long flags;
struct ip2pr_ipoib_wait *ipoib_wait;
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
- for (ipoib_wait = _tsIp2prLinkRoot.wait_list;
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
+ for (ipoib_wait = _link_root.wait_list;
NULL != ipoib_wait; ipoib_wait = ipoib_wait->next) {
if (plid == ipoib_wait->plid) {
@@ -382,7 +392,7 @@
break;
} /* if */
} /* for */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
return ipoib_wait;
}
@@ -419,8 +429,8 @@
/*
* loop across connections.
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.path_lock, flags);
- for (path_elmt = _tsIp2prLinkRoot.path_list, counter = 0;
+ spin_lock_irqsave(&_link_root.path_lock, flags);
+ for (path_elmt = _link_root.path_list, counter = 0;
NULL != path_elmt &&
!(TS_IP2PR_PATH_PROC_DUMP_SIZE > (max_size - offset));
path_elmt = path_elmt->next, counter++) {
@@ -448,7 +458,7 @@
path_elmt->hw_port, path_elmt->usage);
} /* if */
} /* for */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.path_lock, flags);
+ spin_unlock_irqrestore(&_link_root.path_lock, flags);
if (!(start_index > counter)) {
@@ -489,8 +499,8 @@
/*
* loop across connections.
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
- for (ipoib_wait = _tsIp2prLinkRoot.wait_list, counter = 0;
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
+ for (ipoib_wait = _link_root.wait_list, counter = 0;
NULL != ipoib_wait &&
!(TS_IP2PR_IPOIB_PROC_DUMP_SIZE > (max_size - offset));
ipoib_wait = ipoib_wait->next, counter++) {
@@ -510,7 +520,7 @@
ipoib_wait->retry, ipoib_wait->flags);
} /* if */
} /* for */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
if (!(start_index > counter)) {
@@ -541,7 +551,7 @@
long *end_index)
{
return (ip2pr_proc_read_int(buffer, max_size, start_index,
- end_index, _tsIp2prLinkRoot.max_retries));
+ end_index, _link_root.max_retries));
}
/* ip2pr_proc-timeout_read -- dump current timeout value */
@@ -549,7 +559,7 @@
long *end_index)
{
return (ip2pr_proc_read_int(buffer, max_size, start_index,
- end_index, _tsIp2prLinkRoot.retry_timeout));
+ end_index, _link_root.retry_timeout));
}
/* ip2pr_proc_backoff_read -- dump current backoff value */
@@ -557,7 +567,7 @@
long *end_index)
{
return (ip2pr_proc_read_int(buffer, max_size, start_index,
- end_index, _tsIp2prLinkRoot.backoff));
+ end_index, _link_root.backoff));
}
/* ip2pr_proc_cache_timeout_read -- dump current cache timeout value */
@@ -565,7 +575,7 @@
long *end_index)
{
return (ip2pr_proc_read_int(buffer, max_size, start_index,
- end_index, _tsIp2prLinkRoot.cache_timeout));
+ end_index, _link_root.cache_timeout));
}
/* ip2pr_proc_total_req -- dump current retry value */
@@ -633,7 +643,7 @@
ret = ip2pr_proc_write_int(file, buffer, count, pos, &val);
if (val <= TS_IP2PR_PATH_MAX_RETRIES)
- _tsIp2prLinkRoot.max_retries = val;
+ _link_root.max_retries = val;
return (ret);
}
@@ -647,7 +657,7 @@
ret = ip2pr_proc_write_int(file, buffer, count, pos, &val);
if (val <= TS_IP2PR_MAX_DEV_PATH_WAIT)
- _tsIp2prLinkRoot.retry_timeout = val;
+ _link_root.retry_timeout = val;
return (ret);
}
@@ -661,7 +671,7 @@
ret = ip2pr_proc_write_int(file, buffer, count, pos, &val);
if (val <= TS_IP2PR_PATH_MAX_BACKOFF)
- _tsIp2prLinkRoot.backoff = val;
+ _link_root.backoff = val;
return (ret);
}
@@ -675,7 +685,7 @@
ret = ip2pr_proc_write_int(file, buffer, count, pos, &val);
if (val <= TS_IP2PR_PATH_MAX_CACHE_TIMEOUT)
- _tsIp2prLinkRoot.cache_timeout = val;
+ _link_root.cache_timeout = val;
return (ret);
}
@@ -717,8 +727,8 @@
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
"POST: Status <%d> path completion:", status);
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
- "POST: <%p:%d:%04x> <%016llx:%016llx> <%016llx:%016llx>",
- ipoib_wait->ca,
+ "POST: <%s:%d:%04x> <%016llx:%016llx> <%016llx:%016llx>",
+ ipoib_wait->ca->name,
ipoib_wait->hw_port,
ipoib_wait->pkey,
be64_to_cpu(ipoib_wait->src_hw.gid.s.high),
@@ -741,9 +751,9 @@
ip2pr_path_timeout++;
ipoib_wait->prev_timeout = (ipoib_wait->prev_timeout * 2);
- if (ipoib_wait->prev_timeout > _tsIp2prLinkRoot.backoff) {
+ if (ipoib_wait->prev_timeout > _link_root.backoff) {
- ipoib_wait->prev_timeout = _tsIp2prLinkRoot.backoff;
+ ipoib_wait->prev_timeout = _link_root.backoff;
}
/*
* reinitiate path record resolution
@@ -862,8 +872,8 @@
/*
* reset retry counter
*/
- ipoib_wait->retry = _tsIp2prLinkRoot.max_retries;
- ipoib_wait->prev_timeout = _tsIp2prLinkRoot.retry_timeout;
+ ipoib_wait->retry = _link_root.max_retries;
+ ipoib_wait->prev_timeout = _link_root.retry_timeout;
/*
* initiate path record resolution
*/
@@ -1022,7 +1032,7 @@
0 == (IFF_LOOPBACK & rt->u.dst.neighbour->dev->flags)) {
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_WARN,
- "FIND: Nneighbour device is not IPoIB. <%s:%08x>",
+ "FIND: Neighbour device is not IPoIB. <%s:%08x>",
rt->u.dst.neighbour->dev->name,
rt->u.dst.neighbour->dev->flags);
@@ -1122,8 +1132,8 @@
}
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
- "FIND: hca <%04x> for port <%02x> gid <%016llx:%016llx>",
- ipoib_wait->ca,
+ "FIND: hca <%s> for port <%02x> gid <%016llx:%016llx>",
+ ipoib_wait->ca->name,
ipoib_wait->hw_port,
be64_to_cpu(ipoib_wait->src_hw.gid.s.high),
be64_to_cpu(ipoib_wait->src_hw.gid.s.low));
@@ -1205,8 +1215,8 @@
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
"RECV: Arp completion for <%08x>.", ip_addr);
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
- ipoib_wait = _tsIp2prLinkRoot.wait_list;
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
+ ipoib_wait = _link_root.wait_list;
while (NULL != ipoib_wait) {
next_wait = ipoib_wait->next;
@@ -1244,7 +1254,7 @@
ipoib_wait = next_wait;
}
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
return;
}
@@ -1283,9 +1293,9 @@
/*
* determine if anyone is waiting for this ARP response.
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_lock_irqsave(&_link_root.wait_lock, flags);
- for (counter = 0, ipoib_wait = _tsIp2prLinkRoot.wait_list;
+ for (counter = 0, ipoib_wait = _link_root.wait_list;
NULL != ipoib_wait;
ipoib_wait = ipoib_wait->next) {
@@ -1320,7 +1330,7 @@
tqp = &ipoib_wait->arp_completion;
} /* if */
} /* for */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.wait_lock, flags);
+ spin_unlock_irqrestore(&_link_root.wait_lock, flags);
/*
* Schedule the ARP completion.
@@ -1355,7 +1365,7 @@
/*
* destroy all cached path record elements.
*/
- while (NULL != (path_elmt = _tsIp2prLinkRoot.path_list)) {
+ while (NULL != (path_elmt = _link_root.path_list)) {
result = ip2pr_path_element_destroy(path_elmt);
TS_EXPECT(MOD_IP2PR, !(0 > result));
@@ -1364,8 +1374,8 @@
/*
* Mark the source gid node based on port state
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
- for (sgid_elmt = _tsIp2prLinkRoot.src_gid_list;
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
+ for (sgid_elmt = _link_root.src_gid_list;
NULL != sgid_elmt; sgid_elmt = sgid_elmt->next) {
if ((sgid_elmt->ca == record->device) &&
(sgid_elmt->port == record->element.port_num)) {
@@ -1393,11 +1403,13 @@
break;
}
}
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "Async Port Event on hca=<%d>,port=<%d>, event=%d",
- record->device, record->element.port_num, record->event);
+ "Async Port Event on hca <%s> port <%d> event <%d>",
+ record->device->name,
+ record->element.port_num,
+ record->event);
return;
}
@@ -1412,14 +1424,14 @@
struct ip2pr_gid_pr_element *prn_elmt, *next_prn;
/* cache_timeout of zero implies static path records. */
- if (_tsIp2prLinkRoot.cache_timeout) {
+ if (_link_root.cache_timeout) {
/*
* arg entry is unused.
*/
- path_elmt = _tsIp2prLinkRoot.path_list;
+ path_elmt = _link_root.path_list;
while (NULL != path_elmt) {
next_elmt = path_elmt->next;
- if (!((_tsIp2prLinkRoot.cache_timeout * HZ) >
+ if (!((_link_root.cache_timeout * HZ) >
(s32) (jiffies - path_elmt->usage))) {
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE,
@@ -1438,12 +1450,12 @@
/*
* Go thru' the GID List
*/
- sgid_elmt = _tsIp2prLinkRoot.src_gid_list;
+ sgid_elmt = _link_root.src_gid_list;
while (NULL != sgid_elmt) {
prn_elmt = sgid_elmt->pr_list;
while (NULL != prn_elmt) {
next_prn = prn_elmt->next;
- if (!((_tsIp2prLinkRoot.cache_timeout * HZ) >
+ if (!((_link_root.cache_timeout * HZ) >
(s32) (jiffies - prn_elmt->usage))) {
TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE,
@@ -1648,8 +1660,8 @@
unsigned long flags;
*gid_node = NULL;
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
- for (sgid_elmt = _tsIp2prLinkRoot.src_gid_list;
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
+ for (sgid_elmt = _link_root.src_gid_list;
NULL != sgid_elmt; sgid_elmt = sgid_elmt->next) {
if (IB_PORT_ACTIVE == sgid_elmt->port_state) {
@@ -1687,7 +1699,7 @@
prn_elmt->usage = jiffies;
spin_unlock_irqrestore
- (&_tsIp2prLinkRoot.gid_lock,
+ (&_link_root.gid_lock,
flags);
return (0);
}
@@ -1695,7 +1707,7 @@
}
}
}
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
return (-ENOENT);
}
@@ -1707,17 +1719,17 @@
unsigned long flags;
*gid_node = NULL;
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
- for (sgid_elmt = _tsIp2prLinkRoot.src_gid_list;
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
+ for (sgid_elmt = _link_root.src_gid_list;
NULL != sgid_elmt; sgid_elmt = sgid_elmt->next) {
if (0 == memcmp(sgid_elmt->gid, src_gid, sizeof(src_gid))) {
*gid_node = sgid_elmt;
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock,
+ spin_unlock_irqrestore(&_link_root.gid_lock,
flags);
return (0);
}
}
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
return (-EINVAL);
}
@@ -1733,7 +1745,7 @@
if (ip2pr_src_gid_node_get(ipoib_wait->src_hw.gid.all, &gid_node))
return (-EINVAL);
- prn_elmt = kmem_cache_alloc(_tsIp2prLinkRoot.gid_pr_cache, SLAB_ATOMIC);
+ prn_elmt = kmem_cache_alloc(_link_root.gid_pr_cache, SLAB_ATOMIC);
if (NULL == prn_elmt) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"PATH: Error Allocating prn memory.");
@@ -1744,7 +1756,7 @@
/*
* Insert into the ccache list
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
prn_elmt->next = gid_node->pr_list;
gid_node->pr_list = prn_elmt;
prn_elmt->p_next = &gid_node->pr_list;
@@ -1754,7 +1766,7 @@
prn_elmt->next->p_next = &prn_elmt->next;
}
/* if */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
return (0);
}
@@ -1772,44 +1784,11 @@
prn_elmt->p_next = NULL;
prn_elmt->next = NULL;
} /* if */
- kmem_cache_free(_tsIp2prLinkRoot.gid_pr_cache, prn_elmt);
+ kmem_cache_free(_link_root.gid_pr_cache, prn_elmt);
return (0);
}
-/* ip2pr_src_gid_delete -- Cleanup one node in Source GID List. */
-static s32 ip2pr_src_gid_delete(struct ip2pr_sgid_element *sgid_elmt)
-{
- unsigned long flags;
- struct ip2pr_gid_pr_element *prn_elmt;
-
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
-
- /*
- * Clear Path Record List for this Source GID node
- */
- while (NULL != (prn_elmt = sgid_elmt->pr_list)) {
- ip2pr_delete(prn_elmt);
- } /* while */
-
- if (NULL != sgid_elmt->p_next) {
-
- if (NULL != sgid_elmt->next) {
- sgid_elmt->next->p_next = sgid_elmt->p_next;
- }
- /* if */
- *(sgid_elmt->p_next) = sgid_elmt->next;
-
- sgid_elmt->p_next = NULL;
- sgid_elmt->next = NULL;
- } /* if */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
-
- kmem_cache_free(_tsIp2prLinkRoot.src_gid_cache, sgid_elmt);
-
- return (0);
-}
-
/* ip2pr_src_gid_add -- Add one node to Source GID List. */
s32 ip2pr_src_gid_add(struct ib_device *hca_device,
tTS_IB_PORT port,
@@ -1818,8 +1797,7 @@
struct ip2pr_sgid_element *sgid_elmt;
unsigned long flags;
- sgid_elmt =
- kmem_cache_alloc(_tsIp2prLinkRoot.src_gid_cache, SLAB_ATOMIC);
+ sgid_elmt = kmem_cache_alloc(_link_root.src_gid_cache, SLAB_ATOMIC);
if (NULL == sgid_elmt) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"PATH: Error Allocating sgidn memory.");
@@ -1829,10 +1807,9 @@
memset(sgid_elmt, 0, sizeof(*sgid_elmt));
if (ib_query_gid(hca_device, port, 0,
(union ib_gid *) sgid_elmt->gid)) {
- kmem_cache_free(_tsIp2prLinkRoot.src_gid_cache, sgid_elmt);
+ kmem_cache_free(_link_root.src_gid_cache, sgid_elmt);
return (-EFAULT);
}
-
/*
* set the fields
*/
@@ -1840,22 +1817,20 @@
sgid_elmt->port = port;
sgid_elmt->port_state = port_state;
sgid_elmt->gid_index = 0;
- sgid_elmt->port_state = port_state;
-
/*
* insert it into the list
*/
- spin_lock_irqsave(&_tsIp2prLinkRoot.gid_lock, flags);
- sgid_elmt->next = _tsIp2prLinkRoot.src_gid_list;
- _tsIp2prLinkRoot.src_gid_list = sgid_elmt;
- sgid_elmt->p_next = &_tsIp2prLinkRoot.src_gid_list;
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
+ sgid_elmt->next = _link_root.src_gid_list;
+ _link_root.src_gid_list = sgid_elmt;
+ sgid_elmt->p_next = &_link_root.src_gid_list;
if (NULL != sgid_elmt->next) {
sgid_elmt->next->p_next = &sgid_elmt->next;
}
- /* if */
- spin_unlock_irqrestore(&_tsIp2prLinkRoot.gid_lock, flags);
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
+
return (0);
}
@@ -1921,7 +1896,10 @@
func = (tGID2PR_LOOKUP_FUNC) ipoib_wait->func;
return func(tid,
status,
- ipoib_wait->hw_port, ipoib_wait->ca, path, ipoib_wait->arg);
+ ipoib_wait->hw_port,
+ ipoib_wait->ca,
+ path,
+ ipoib_wait->arg);
return (0);
}
@@ -1949,12 +1927,15 @@
if (0 == ip2pr_gid_cache_lookup(src_gid,
dst_gid, &path_record, &gid_node)) {
func = (tGID2PR_LOOKUP_FUNC) funcptr;
- result =
- func(*plid, 0, gid_node->port, gid_node->ca, &path_record,
- arg);
+ result = func(*plid,
+ 0,
+ gid_node->port,
+ gid_node->ca,
+ &path_record,
+ arg);
if (0 != result) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "PATH: Error <%d> Completing Path Record Request.",
+ "PATH: Path Record Request error. <%d>",
result);
}
return (0);
@@ -1973,9 +1954,9 @@
if (NULL == ipoib_wait) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "PATH: Error creating address resolution wait object");
+ "PATH: Error creating wait object");
return (-ENOMEM);
- } /* if */
+ }
ipoib_wait->ca = gid_node->ca;
ipoib_wait->hw_port = gid_node->port;
ipoib_wait->pkey = pkey;
@@ -2010,102 +1991,152 @@
}
EXPORT_SYMBOL(gid2pr_lookup);
-/* ip2pr_src_gid_cleanup -- Cleanup the Source GID List. */
-s32 ip2pr_src_gid_cleanup(void)
+/* ip2pr_device_remove_one -- remove one device */
+static void ip2pr_device_remove_one(struct ib_device *device)
{
+ struct ip2pr_gid_pr_element *prn_elmt;
struct ip2pr_sgid_element *sgid_elmt;
- s32 result;
+ struct ip2pr_sgid_element *next_elmt;
+ struct ib_event_handler *handler;
+ unsigned long flags;
- while (NULL != (sgid_elmt = _tsIp2prLinkRoot.src_gid_list)) {
+ TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
+ "INIT: removing device. <%s>", device->name);
- result = ip2pr_src_gid_delete(sgid_elmt);
- TS_EXPECT(MOD_IP2PR, !(0 > result));
- } /* while */
+ spin_lock_irqsave(&_link_root.gid_lock, flags);
- kmem_cache_destroy(_tsIp2prLinkRoot.src_gid_cache);
- kmem_cache_destroy(_tsIp2prLinkRoot.gid_pr_cache);
+ sgid_elmt = _link_root.src_gid_list;
+ while (NULL != sgid_elmt) {
- return (0);
+ if (device != sgid_elmt->ca) {
+
+ sgid_elmt = sgid_elmt->next;
+ continue;
+ }
+ /*
+ * Clear Path Record List for this Source GID node
+ */
+ while (NULL != (prn_elmt = sgid_elmt->pr_list)) {
+
+ ip2pr_delete(prn_elmt);
+ }
+
+ next_elmt = sgid_elmt->next;
+
+ if (NULL != sgid_elmt->next) {
+ sgid_elmt->next->p_next = sgid_elmt->p_next;
+ }
+
+ *(sgid_elmt->p_next) = sgid_elmt->next;
+
+ sgid_elmt->p_next = NULL;
+ sgid_elmt->next = NULL;
+
+ kmem_cache_free(_link_root.src_gid_cache, sgid_elmt);
+
+ sgid_elmt = next_elmt;
+ }
+
+ spin_unlock_irqrestore(&_link_root.gid_lock, flags);
+ /*
+ * clean up async handler
+ */
+ handler = ib_get_client_data(device, &ip2pr_client);
+ if (NULL == handler) {
+
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
+ "INIT: async handler lookup failure. <%s>",
+ device->name);
+ }
+ else {
+
+ ib_unregister_event_handler(handler);
+ kfree(handler);
+ }
+
+ return;
}
-/* ip2pr_src_gid_init -- initialize the Source GID List. */
-s32 ip2pr_src_gid_init(void)
+/* ip2pr_device_init_one -- initialize one device */
+static void ip2pr_device_init_one(struct ib_device *device)
{
- s32 result = 0;
- int i, j;
- struct ib_device *hca_device;
struct ib_device_attr dev_prop;
struct ib_port_attr port_prop;
+ struct ib_event_handler *handler;
+ int counter;
+ int result;
- _tsIp2prLinkRoot.src_gid_cache = kmem_cache_create("Ip2prSrcGidList",
- sizeof
- (struct ip2pr_sgid_element),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (NULL == _tsIp2prLinkRoot.src_gid_cache) {
+ TS_TRACE(MOD_IP2PR, T_VERY_VERBOSE, TRACE_FLOW_INOUT,
+ "INIT: adding new device. <%s>", device->name);
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "INIT: Failed to create src gid cache.");
- return (-ENOMEM);
- }
- /* if */
- _tsIp2prLinkRoot.gid_pr_cache = kmem_cache_create("Ip2prGidPrList",
- sizeof
- (struct ip2pr_gid_pr_element),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (NULL == _tsIp2prLinkRoot.gid_pr_cache) {
+ result = ib_query_device(device, &dev_prop);
+ if (result) {
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "INIT: Failed to create gid to pr list cache.");
- kmem_cache_destroy(_tsIp2prLinkRoot.src_gid_cache);
- return (-ENOMEM);
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
+ "INIT: Error <%d> querying device. <%s>",
+ result, device->name);
+
+ return;
}
-
- /* if */
/*
- * Create SGID list for each port on hca
+ * query ports.
*/
- for (i = 0; ((hca_device = ib_device_get_by_index(i)) != NULL); ++i) {
- if (ib_query_device(hca_device, &dev_prop)) {
- TS_REPORT_FATAL(MOD_IB_NET,
- "ib_device_properties_get() failed");
- return -EINVAL;
+ for (counter = 0; counter < dev_prop.phys_port_cnt; counter++) {
+
+ result = ib_query_port(device, (counter + 1), &port_prop);
+ if (result) {
+
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
+ "INIT: Error <%d> querying port. <%s:%d:%d>",
+ result, device->name, counter + 1,
+ dev_prop.phys_port_cnt);
+ continue;
}
- for (j = 1; j <= dev_prop.phys_port_cnt; j++) {
- if (ib_query_port(hca_device, j, &port_prop)) {
- continue;
- }
+ result = ip2pr_src_gid_add(device,
+ (counter + 1),
+ port_prop.state);
+ if (0 > result) {
+
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
+ "INIT: Error <%d> saving GID. <%s:%d:%d>",
+ result, device->name, counter + 1,
+ dev_prop.phys_port_cnt);
+ }
+ }
+ /*
+ * allocate and set async event handler.
+ */
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
- result = ip2pr_src_gid_add(hca_device, j,
- port_prop.state);
- if (0 > result) {
- goto port_err;
- }
- } /* for */
- } /* for */
- return (0);
+ INIT_IB_EVENT_HANDLER(handler, device, ip2pr_event_func);
- port_err:
- kmem_cache_destroy(_tsIp2prLinkRoot.src_gid_cache);
- kmem_cache_destroy(_tsIp2prLinkRoot.gid_pr_cache);
+ result = ib_register_event_handler(handler);
+ if (result) {
- return (result);
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
+ "INIT: Error <%d> registering event handler.",
+ result);
+
+ kfree(handler);
+ }
+ else {
+
+ ib_set_client_data(device, &ip2pr_client, handler);
+ }
+
+ return;
}
/* ip2pr_link_addr_init -- initialize the advertisment caches. */
-s32 ip2pr_link_addr_init(void)
+int ip2pr_link_addr_init(void)
{
- s32 result = 0;
- int i;
- struct ib_device *hca_device;
+ int result = 0;
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
"INIT: Link level services initialization.");
- if (NULL != _tsIp2prLinkRoot.wait_cache) {
+ if (NULL != _link_root.wait_cache) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"INIT: Wait cache is already initialized!");
@@ -2113,67 +2144,87 @@
result = -EINVAL;
goto error;
}
- /* if */
/*
* create cache
*/
- _tsIp2prLinkRoot.wait_cache = kmem_cache_create("Ip2prIpoibWait",
- sizeof
- (struct ip2pr_ipoib_wait),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (NULL == _tsIp2prLinkRoot.wait_cache) {
+ _link_root.wait_cache = kmem_cache_create("ip2pr_wait",
+ sizeof
+ (struct ip2pr_ipoib_wait),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (NULL == _link_root.wait_cache) {
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"INIT: Failed to create wait cache.");
-
+
result = -ENOMEM;
goto error_wait;
}
- /* if */
- _tsIp2prLinkRoot.path_cache = kmem_cache_create("Ip2prPathLookup",
- sizeof
- (struct ip2pr_path_element),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (NULL == _tsIp2prLinkRoot.path_cache) {
+ _link_root.path_cache = kmem_cache_create("ip2pr_path",
+ sizeof
+ (struct ip2pr_path_element),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (NULL == _link_root.path_cache) {
+
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"INIT: Failed to create path lookup cache.");
result = -ENOMEM;
goto error_path;
}
- /* if */
- _tsIp2prLinkRoot.user_req = kmem_cache_create("Ip2prUserReq",
- sizeof
- (struct ip2pr_user_req),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (NULL == _tsIp2prLinkRoot.user_req) {
+ _link_root.user_req = kmem_cache_create("ip2pr_user",
+ sizeof(struct ip2pr_user_req),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (NULL == _link_root.user_req) {
+
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
"INIT: Failed to create user request cache.");
result = -ENOMEM;
goto error_user;
}
+
+ _link_root.src_gid_cache = kmem_cache_create("ip2pr_src_gid",
+ sizeof
+ (struct ip2pr_sgid_element),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (NULL == _link_root.src_gid_cache) {
+
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
+ "INIT: Failed to create src gid cache.");
+ result = -ENOMEM;
+ goto error_gid;
+ }
+
+ _link_root.gid_pr_cache = kmem_cache_create("ip2pr_gid_pr",
+ sizeof
+ (struct ip2pr_gid_pr_element),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (NULL == _link_root.gid_pr_cache) {
+
+ TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
+ "INIT: Failed to create gid to pr list cache.");
+
+ result = -ENOMEM;
+ goto error_pre;
+ }
/*
- * Install async event handler, to clear cache on port down
+ * register for device events.
*/
+ result = ib_register_client(&ip2pr_client);
+ if (0 > result) {
- for (i = 0; ((hca_device = ib_device_get_by_index(i)) != NULL); ++i) {
- INIT_IB_EVENT_HANDLER(&_tsIp2prEventHandle[i],
- hca_device, ip2pr_event_func);
- result = ib_register_event_handler(&_tsIp2prEventHandle[i]);
- if (result) {
- TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_WARN,
- "INIT: Error <%d> registering event handler.",
- result);
- goto error_async;
- }
+ TS_TRACE(MOD_IP2PR, T_TERSE, TRACE_FLOW_FATAL,
+ "INIT: Error <%d> registering client.", result);
+ goto error_hca;
}
-
/*
* create timer for pruning path record cache.
*/
@@ -2189,41 +2240,43 @@
*/
dev_add_pack(&_sdp_arp_type);
- _tsIp2prLinkRoot.backoff = TS_IP2PR_PATH_BACKOFF;
- _tsIp2prLinkRoot.max_retries = TS_IP2PR_PATH_RETRIES;
- _tsIp2prLinkRoot.retry_timeout = TS_IP2PR_DEV_PATH_WAIT;
- _tsIp2prLinkRoot.cache_timeout = TS_IP2PR_PATH_REAPING_AGE;
+ _link_root.backoff = TS_IP2PR_PATH_BACKOFF;
+ _link_root.max_retries = TS_IP2PR_PATH_RETRIES;
+ _link_root.retry_timeout = TS_IP2PR_DEV_PATH_WAIT;
+ _link_root.cache_timeout = TS_IP2PR_PATH_REAPING_AGE;
return 0;
- error_async:
-
- for (i = 0; i < IP2PR_MAX_HCAS; i++)
- if (_tsIp2prEventHandle[i].device)
- ib_unregister_event_handler(&_tsIp2prEventHandle[i]);
-
- kmem_cache_destroy(_tsIp2prLinkRoot.user_req);
- error_user:
- kmem_cache_destroy(_tsIp2prLinkRoot.path_cache);
- error_path:
- kmem_cache_destroy(_tsIp2prLinkRoot.wait_cache);
- error_wait:
- error:
+error_hca:
+ kmem_cache_destroy(_link_root.gid_pr_cache);
+error_pre:
+ kmem_cache_destroy(_link_root.src_gid_cache);
+error_gid:
+ kmem_cache_destroy(_link_root.user_req);
+error_user:
+ kmem_cache_destroy(_link_root.path_cache);
+error_path:
+ kmem_cache_destroy(_link_root.wait_cache);
+error_wait:
+error:
return result;
}
/* ip2pr_link_addr_cleanup -- cleanup the advertisment caches. */
-s32 ip2pr_link_addr_cleanup(void)
+int ip2pr_link_addr_cleanup(void)
{
struct ip2pr_path_element *path_elmt;
struct ip2pr_ipoib_wait *ipoib_wait;
- u32 result;
- int i;
+ int result;
- TS_CHECK_NULL(_tsIp2prLinkRoot.wait_cache, -EINVAL);
+ TS_CHECK_NULL(_link_root.wait_cache, -EINVAL);
TS_TRACE(MOD_IP2PR, T_VERBOSE, TRACE_FLOW_INOUT,
"INIT: Link level services cleanup.");
/*
+ * delete list of HCAs/PORTs
+ */
+ ib_unregister_client(&ip2pr_client);
+ /*
* stop cache pruning timer
*/
tsKernelTimerRemove(&_tsIp2prPathTimer);
@@ -2231,25 +2284,17 @@
* remove ARP packet processing.
*/
dev_remove_pack(&_sdp_arp_type);
-
/*
- * release async event handler(s)
- */
- for (i = 0; i < IP2PR_MAX_HCAS; i++)
- if (_tsIp2prEventHandle[i].device)
- ib_unregister_event_handler(&_tsIp2prEventHandle[i]);
-
- /*
* clear wait list
*/
- while (NULL != (ipoib_wait = _tsIp2prLinkRoot.wait_list)) {
+ while (NULL != (ipoib_wait = _link_root.wait_list)) {
result = ip2pr_ipoib_wait_destroy(ipoib_wait,
IP2PR_LOCK_NOT_HELD);
TS_EXPECT(MOD_IP2PR, !(0 > result));
} /* while */
- while (NULL != (path_elmt = _tsIp2prLinkRoot.path_list)) {
+ while (NULL != (path_elmt = _link_root.path_list)) {
result = ip2pr_path_element_destroy(path_elmt);
TS_EXPECT(MOD_IP2PR, !(0 > result));
@@ -2257,10 +2302,13 @@
/*
* delete cache
*/
- kmem_cache_destroy(_tsIp2prLinkRoot.wait_cache);
- kmem_cache_destroy(_tsIp2prLinkRoot.path_cache);
- kmem_cache_destroy(_tsIp2prLinkRoot.user_req);
+ kmem_cache_destroy(_link_root.gid_pr_cache);
+ kmem_cache_destroy(_link_root.src_gid_cache);
+ kmem_cache_destroy(_link_root.wait_cache);
+ kmem_cache_destroy(_link_root.path_cache);
+ kmem_cache_destroy(_link_root.user_req);
+
return 0;
}
@@ -2330,7 +2378,7 @@
return (-EINVAL);
}
- ureq = kmem_cache_alloc(_tsIp2prLinkRoot.user_req, SLAB_ATOMIC);
+ ureq = kmem_cache_alloc(_link_root.user_req, SLAB_ATOMIC);
if (NULL == ureq) {
return (-ENOMEM);
}
@@ -2340,25 +2388,25 @@
status = ip2pr_path_record_lookup(param.dst_addr, 0, 0, 0,
ip2pr_cb_internal, ureq, &plid);
if (status < 0) {
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EFAULT);
}
status = down_interruptible(&ureq->sem);
if (status) {
ip2pr_path_record_cancel(plid);
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EINTR);
}
if (ureq->status) {
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EHOSTUNREACH);
}
copy_to_user(param.path_record, &ureq->path_record,
sizeof(*param.path_record));
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (0);
}
@@ -2383,7 +2431,7 @@
if (NULL == param.path_record) {
return (-EINVAL);
}
- ureq = kmem_cache_alloc(_tsIp2prLinkRoot.user_req, SLAB_ATOMIC);
+ ureq = kmem_cache_alloc(_link_root.user_req, SLAB_ATOMIC);
if (NULL == ureq) {
return (-ENOMEM);
}
@@ -2393,19 +2441,19 @@
status = gid2pr_lookup(param.src_gid, param.dst_gid, param.pkey,
gid2pr_cb_internal, (void *) ureq, &plid);
if (status < 0) {
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EFAULT);
}
status = down_interruptible(&ureq->sem);
if (status) {
gid2pr_cancel(plid);
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EINTR);
}
if (ureq->status) {
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (-EHOSTUNREACH);
}
@@ -2414,7 +2462,7 @@
copy_to_user(&upa->port, &ureq->port, sizeof(upa->port));
copy_to_user(param.path_record, &ureq->path_record,
sizeof(*param.path_record));
- kmem_cache_free(_tsIp2prLinkRoot.user_req, ureq);
+ kmem_cache_free(_link_root.user_req, ureq);
return (0);
}
More information about the openib-general mailing list