From b1d9fd5574763abe5c763e32e3547a4adee9bd88 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:22 -0700 Subject: [SCSI] libfc: rename lport NONE state to DISABLED The state NONE was meant to be invalid, but has been used as the initial state. Rename it to be DISABLED, as more descriptive. Further patches will make it the like the RESET state, except it won't transition to FLOGI until fc_lport_fabric_login() is called. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 145ab9ba55ea..e6d82d780acd 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1875,7 +1875,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, u32 f_ctl; /* lport lock ? */ - if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { + if (!lp || !mp || lp->state == LPORT_ST_DISABLED) { FC_LPORT_DBG(lp, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); -- cgit v1.2.3 From 84b05445b9f0b1ac2192f32260c916426d902d79 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Wed, 29 Jul 2009 17:04:38 -0700 Subject: [SCSI] libfc: fix WARNING from fc_seq_start_next on closed exchanges We saw periodic messages like: WARNING: at drivers/scsi/libfc/fc_exch.c:825 fc_seq_start_next+0x30/0x4b This was due to trying to allocate a sequence in a request handler when the exchange had been reset. Delete the WARN_ON. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index e6d82d780acd..cab54996375c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -822,7 +822,6 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp) struct fc_exch *ep = fc_seq_exch(sp); spin_lock_bh(&ep->ex_lock); - WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0); sp = fc_seq_start_next_locked(sp); spin_unlock_bh(&ep->ex_lock); -- cgit v1.2.3 From 96316099ac3cb259eac2d6891f3c75b38b29d26e Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:00 -0700 Subject: [SCSI] fcoe, libfc: adds exchange manager(EM) anchor list per lport and related APIs Adds EM list using a anchor struct fc_exch_mgr_anchor, anchor is used to allow same EM instance sharing across more than one lport on a eth device, this implementation is per discussed design posted at http://www.open-fcoe.org/pipermail/devel/2009-June/002566.html. The shared EM is required for multiple lports on eth device when using multiple VLANs or NPIV. Adds fc_exch_mgr_add API to add a EM to the lport and fc_exch_mgr_del API to delete previously added EM. Also adds function fc_exch_mgr_destroy() to destroy allocated EM. The kref is added to the EM to keep track of EM usage count, the EM is destroyed when no longer in use upon kref reaching to zero. The caller can specify match function to fc_exch_mgr_add, this will be used in determining exchange allocation from its EM or not. Moved calling of fcoe_em_config below fcoe_libfc_config calling, so that list head lp->ema_list is initialized before configuring EM. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index cab54996375c..f1fa2b196e98 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -55,6 +55,7 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ */ struct fc_exch_mgr { enum fc_class class; /* default class for sequences */ + struct kref kref; /* exchange mgr reference count */ spinlock_t em_lock; /* exchange manager lock, must be taken before ex_lock */ u16 last_xid; /* last allocated exchange ID */ @@ -84,6 +85,12 @@ struct fc_exch_mgr { }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +struct fc_exch_mgr_anchor { + struct list_head ema_list; + struct fc_exch_mgr *mp; + bool (*match)(struct fc_frame *); +}; + static void fc_exch_rrq(struct fc_exch *); static void fc_seq_ls_acc(struct fc_seq *); static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, @@ -1729,6 +1736,47 @@ reject: fc_frame_free(fp); } +struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport, + struct fc_exch_mgr *mp, + bool (*match)(struct fc_frame *)) +{ + struct fc_exch_mgr_anchor *ema; + + ema = kmalloc(sizeof(*ema), GFP_ATOMIC); + if (!ema) + return ema; + + ema->mp = mp; + ema->match = match; + /* add EM anchor to EM anchors list */ + list_add_tail(&ema->ema_list, &lport->ema_list); + kref_get(&mp->kref); + return ema; +} +EXPORT_SYMBOL(fc_exch_mgr_add); + +static void fc_exch_mgr_destroy(struct kref *kref) +{ + struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); + + /* + * The total exch count must be zero + * before freeing exchange manager. + */ + WARN_ON(mp->total_exches != 0); + mempool_destroy(mp->ep_pool); + kfree(mp); +} + +void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema) +{ + /* remove EM anchor from EM anchors list */ + list_del(&ema->ema_list); + kref_put(&ema->mp->kref, fc_exch_mgr_destroy); + kfree(ema); +} +EXPORT_SYMBOL(fc_exch_mgr_del); + struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, u16 min_xid, u16 max_xid) -- cgit v1.2.3 From d459b7ea1b4c7aa3dacfeee174d02b2f7a95850d Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 29 Jul 2009 17:05:05 -0700 Subject: [SCSI] libfc: Remove the FC_EM_DBG macro Currently there is a 1:1 relationship between the lport and exchange manager. This macro takes an EM as an argument and determines the lport from it. However, later patches will use an EM list per lport, so we will no longer have this 1:1 relationship- this macro must change. The FC_EM_DBG macro is rarely used. There are four callers, two can use FC_LPORT_DBG instead and two can be removed since they're not necessary. This patch makes those changes and removes the macro. Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index f1fa2b196e98..3ad7f88e7ae3 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1129,7 +1129,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, lp->tt.lport_recv(lp, sp, fp); fc_exch_release(ep); /* release from lookup */ } else { - FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); + FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject); fc_frame_free(fp); } } @@ -1235,13 +1235,12 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) struct fc_seq *sp; sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ - if (!sp) { + + if (!sp) atomic_inc(&mp->stats.xid_not_found); - FC_EM_DBG(mp, "seq lookup failed\n"); - } else { + else atomic_inc(&mp->stats.non_bls_resp); - FC_EM_DBG(mp, "non-BLS response to sequence"); - } + fc_frame_free(fp); } @@ -1950,7 +1949,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, fc_exch_recv_req(lp, mp, fp); break; default: - FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); + FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); fc_frame_free(fp); break; } -- cgit v1.2.3 From 52ff878c912215210f53c0a080552dd6ba3055a2 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:10 -0700 Subject: [SCSI] fcoe, fnic, libfc: modifies current code paths to use EM anchor list Modifies current code to use EM anchor list in EM allocation, EM free, EM reset, exch allocation and exch lookup code paths. 1. Modifies fc_exch_mgr_alloc to accept EM match function and then have allocated EM added to the lport using fc_exch_mgr_add API while also updating EM kref for newly added EM. 2. Updates fc_exch_mgr_free API to accept only lport pointer instead EM and then have this API free all EMs of the lport from EM anchor list. 3. Removes single lport pointer link from the EM, which was used in associating lport pointer in newly allocated exchange. Instead have lport pointer passed along new exchange allocation call path and then store passed lport pointer in newly allocated exchange, this will allow a single EM instance to be used across more than one lport and used in EM reset to reset only lport specific exchanges. 4. Modifies fc_exch_mgr_reset to reset all EMs from the EM anchor list of the lport, adds additional exch lport pointer (ep->lp) check for shared EM case to reset exchange specific to a lport requested reset. 5. Updates exch allocation API fc_exch_alloc to use EM anchor list and its anchor match func pointer. The fc_exch_alloc will walk the list of EMs until it finds a match, a match will be either null match func pointer or call to match function returning true value. 6. Updates fc_exch_recv to accept incoming frame on local port using only lport pointer and frame pointer without specifying EM instance of incoming frame. Instead modified fc_exch_recv to locate EM for the incoming frame by matching xid of incoming frame against a EM xid range. This change was required to use EM list in libfc Rx path and after this change the lport fc_exch_mgr pointer emp is not needed anymore, so removed emp pointer. 7. Updates fnic for removed lport emp pointer and above modified libfc APIs fc_exch_recv, fc_exch_mgr_alloc and fc_exch_mgr_free. 8. Removes exch_get and exch_put from libfc_function_template as these are no longer needed with EM anchor list and its match function use. Also removes its default function fc_exch_get. A defect this patch introduced regarding the libfc initialization order in the fnic driver was fixed by Joe Eykholt . Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 191 ++++++++++++++++++++++++++----------------- 1 file changed, 117 insertions(+), 74 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 3ad7f88e7ae3..324589a5cc03 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -65,7 +65,6 @@ struct fc_exch_mgr { u16 last_read; /* last xid allocated for read */ u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ - struct fc_lport *lp; /* fc device instance */ mempool_t *ep_pool; /* reserve ep's */ /* @@ -275,8 +274,6 @@ static void fc_exch_release(struct fc_exch *ep) mp = ep->em; if (ep->destructor) ep->destructor(&ep->seq, ep->arg); - if (ep->lp->tt.exch_put) - ep->lp->tt.exch_put(ep->lp, mp, ep->xid); WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); mempool_free(ep, mp->ep_pool); } @@ -513,17 +510,20 @@ static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) return xid; } -/* - * fc_exch_alloc - allocate an exchange. - * @mp : ptr to the exchange manager - * @xid: input xid +/** + * fc_exch_em_alloc() - allocate an exchange from a specified EM. + * @lport: ptr to the local port + * @mp: ptr to the exchange manager + * @fp: ptr to the FC frame + * @xid: input xid * * if xid is supplied zero then assign next free exchange ID * from exchange manager, otherwise use supplied xid. * Returns with exch lock held. */ -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, - struct fc_frame *fp, u16 xid) +static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp, u16 xid) { struct fc_exch *ep; @@ -566,7 +566,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, */ ep->oxid = ep->xid = xid; ep->em = mp; - ep->lp = mp->lp; + ep->lp = lport; ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; @@ -579,6 +579,31 @@ err: mempool_free(ep, mp->ep_pool); return NULL; } + +/** + * fc_exch_alloc() - allocate an exchange. + * @lport: ptr to the local port + * @fp: ptr to the FC frame + * + * This function walks the list of the exchange manager(EM) + * anchors to select a EM for new exchange allocation. The + * EM is selected having either a NULL match function pointer + * or call to match function returning true. + */ +struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_exch_mgr_anchor *ema; + struct fc_exch *ep; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if (!ema->match || ema->match(fp)) { + ep = fc_exch_em_alloc(lport, ema->mp, fp, 0); + if (ep) + return ep; + } + } + return NULL; +} EXPORT_SYMBOL(fc_exch_alloc); /* @@ -617,12 +642,14 @@ EXPORT_SYMBOL(fc_exch_done); * Allocate a new exchange as responder. * Sets the responder ID in the frame header. */ -static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) +static struct fc_exch *fc_exch_resp(struct fc_lport *lport, + struct fc_exch_mgr *mp, + struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh; - ep = mp->lp->tt.exch_get(mp->lp, fp); + ep = fc_exch_alloc(lport, fp); if (ep) { ep->class = fc_frame_class(fp); @@ -648,7 +675,7 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) ep->esb_stat &= ~ESB_ST_SEQ_INIT; fc_exch_hold(ep); /* hold for caller */ - spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */ + spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */ } return ep; } @@ -658,7 +685,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold * on the ep that should be released by the caller. */ -static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, +static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, + struct fc_exch_mgr *mp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -712,7 +740,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, reject = FC_RJT_RX_ID; goto rel; } - ep = fc_exch_resp(mp, fp); + ep = fc_exch_resp(lport, mp, fp); if (!ep) { reject = FC_RJT_EXCH_EST; /* XXX */ goto out; @@ -1103,7 +1131,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp, enum fc_pf_rjt_reason reject; fr_seq(fp) = NULL; - reject = fc_seq_lookup_recip(mp, fp); + reject = fc_seq_lookup_recip(lp, mp, fp); if (reject == FC_RJT_NONE) { sp = fr_seq(fp); /* sequence will be held */ ep = fc_seq_exch(sp); @@ -1467,29 +1495,34 @@ void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) { struct fc_exch *ep; struct fc_exch *next; - struct fc_exch_mgr *mp = lp->emp; + struct fc_exch_mgr *mp; + struct fc_exch_mgr_anchor *ema; - spin_lock_bh(&mp->em_lock); + list_for_each_entry(ema, &lp->ema_list, ema_list) { + mp = ema->mp; + spin_lock_bh(&mp->em_lock); restart: - list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { - if ((sid == 0 || sid == ep->sid) && - (did == 0 || did == ep->did)) { - fc_exch_hold(ep); - spin_unlock_bh(&mp->em_lock); - - fc_exch_reset(ep); - - fc_exch_release(ep); - spin_lock_bh(&mp->em_lock); - - /* - * must restart loop incase while lock was down - * multiple eps were released. - */ - goto restart; + list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { + if ((lp == ep->lp) && + (sid == 0 || sid == ep->sid) && + (did == 0 || did == ep->did)) { + fc_exch_hold(ep); + spin_unlock_bh(&mp->em_lock); + + fc_exch_reset(ep); + + fc_exch_release(ep); + spin_lock_bh(&mp->em_lock); + + /* + * must restart loop incase while lock + * was down multiple eps were released. + */ + goto restart; + } } + spin_unlock_bh(&mp->em_lock); } - spin_unlock_bh(&mp->em_lock); } EXPORT_SYMBOL(fc_exch_mgr_reset); @@ -1778,7 +1811,8 @@ EXPORT_SYMBOL(fc_exch_mgr_del); struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, enum fc_class class, - u16 min_xid, u16 max_xid) + u16 min_xid, u16 max_xid, + bool (*match)(struct fc_frame *)) { struct fc_exch_mgr *mp; size_t len; @@ -1803,7 +1837,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, mp->class = class; mp->total_exches = 0; mp->exches = (struct fc_exch **)(mp + 1); - mp->lp = lp; /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; @@ -1826,6 +1859,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (!mp->ep_pool) goto free_mp; + kref_init(&mp->kref); + if (!fc_exch_mgr_add(lp, mp, match)) { + mempool_destroy(mp->ep_pool); + goto free_mp; + } + + /* + * Above kref_init() sets mp->kref to 1 and then + * call to fc_exch_mgr_add incremented mp->kref again, + * so adjust that extra increment. + */ + kref_put(&mp->kref, fc_exch_mgr_destroy); return mp; free_mp: @@ -1834,27 +1879,15 @@ free_mp: } EXPORT_SYMBOL(fc_exch_mgr_alloc); -void fc_exch_mgr_free(struct fc_exch_mgr *mp) +void fc_exch_mgr_free(struct fc_lport *lport) { - WARN_ON(!mp); - /* - * The total exch count must be zero - * before freeing exchange manager. - */ - WARN_ON(mp->total_exches != 0); - mempool_destroy(mp->ep_pool); - kfree(mp); + struct fc_exch_mgr_anchor *ema, *next; + + list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list) + fc_exch_mgr_del(ema); } EXPORT_SYMBOL(fc_exch_mgr_free); -struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp) -{ - if (!lp || !lp->emp) - return NULL; - - return fc_exch_alloc(lp->emp, fp, 0); -} -EXPORT_SYMBOL(fc_exch_get); struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame *fp, @@ -1869,7 +1902,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, struct fc_frame_header *fh; int rc = 1; - ep = lp->tt.exch_get(lp, fp); + ep = fc_exch_alloc(lp, fp); if (!ep) { fc_frame_free(fp); return NULL; @@ -1914,24 +1947,44 @@ EXPORT_SYMBOL(fc_exch_seq_send); /* * Receive a frame */ -void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, - struct fc_frame *fp) +void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); - u32 f_ctl; + struct fc_exch_mgr_anchor *ema; + u32 f_ctl, found = 0; + u16 oxid; /* lport lock ? */ - if (!lp || !mp || lp->state == LPORT_ST_DISABLED) { + if (!lp || lp->state == LPORT_ST_DISABLED) { FC_LPORT_DBG(lp, "Receiving frames for an lport that " "has not been initialized correctly\n"); fc_frame_free(fp); return; } + f_ctl = ntoh24(fh->fh_f_ctl); + oxid = ntohs(fh->fh_ox_id); + if (f_ctl & FC_FC_EX_CTX) { + list_for_each_entry(ema, &lp->ema_list, ema_list) { + if ((oxid >= ema->mp->min_xid) && + (oxid <= ema->mp->max_xid)) { + found = 1; + break; + } + } + + if (!found) { + FC_LPORT_DBG(lp, "Received response for out " + "of range oxid:%hx\n", oxid); + fc_frame_free(fp); + return; + } + } else + ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list); + /* * If frame is marked invalid, just drop it. */ - f_ctl = ntoh24(fh->fh_f_ctl); switch (fr_eof(fp)) { case FC_EOF_T: if (f_ctl & FC_FC_END_SEQ) @@ -1939,34 +1992,24 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, /* fall through */ case FC_EOF_N: if (fh->fh_type == FC_TYPE_BLS) - fc_exch_recv_bls(mp, fp); + fc_exch_recv_bls(ema->mp, fp); else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX) - fc_exch_recv_seq_resp(mp, fp); + fc_exch_recv_seq_resp(ema->mp, fp); else if (f_ctl & FC_FC_SEQ_CTX) - fc_exch_recv_resp(mp, fp); + fc_exch_recv_resp(ema->mp, fp); else - fc_exch_recv_req(lp, mp, fp); + fc_exch_recv_req(lp, ema->mp, fp); break; default: FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp)); fc_frame_free(fp); - break; } } EXPORT_SYMBOL(fc_exch_recv); int fc_exch_init(struct fc_lport *lp) { - if (!lp->tt.exch_get) { - /* - * exch_put() should be NULL if - * exch_get() is NULL - */ - WARN_ON(lp->tt.exch_put); - lp->tt.exch_get = fc_exch_get; - } - if (!lp->tt.seq_start_next) lp->tt.seq_start_next = fc_seq_start_next; -- cgit v1.2.3 From d7179680d04f1e196b7a5f70e7f93bb1850407c6 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Wed, 29 Jul 2009 17:05:21 -0700 Subject: [SCSI] fcoe, libfc: adds offload EM per eth device with only single xid range per EM Updates fcoe_em_config to allocate a single instance of sharable offload EM for supported lp->lro_xid per eth device, and then share this EM for subsequently more lports creation on same eth device (e.g when using VLAN). Adds tiny fcoe_oem_match function for offload EM to return true for read types IO to have read IO exchanges allocated from offload shared EM. Removes fc_em_alloc_xid function completely which was needed to manage two xid ranges within a EM, this is not needed any more with allocation of separate sharable offload EM per eth device. Instead this patch adds simple xid allocation logic to manage single xid range. Adds fc_exch_em_alloc with mp->next_xid as cursor to allocate new xid from single xid range of EM, uses mp->next_xid instead removed mp->last_xid which slightly increase probability of finding empty xid on exch allocation. Removes restriction of not allowing use of xid zero along with changing two xid range change to single xid range. Makes fc_fcp_ddp_setup calling conditional to only xid allocated from shared offload EM. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 94 +++++++++----------------------------------- 1 file changed, 18 insertions(+), 76 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 324589a5cc03..11ddd115efb6 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -58,7 +58,7 @@ struct fc_exch_mgr { struct kref kref; /* exchange mgr reference count */ spinlock_t em_lock; /* exchange manager lock, must be taken before ex_lock */ - u16 last_xid; /* last allocated exchange ID */ + u16 next_xid; /* next possible free exchange ID */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ u16 max_read; /* max exchange ID for read */ @@ -464,68 +464,21 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id) return sp; } -/* - * fc_em_alloc_xid - returns an xid based on request type - * @lp : ptr to associated lport - * @fp : ptr to the assocated frame - * - * check the associated fc_fsp_pkt to get scsi command type and - * command direction to decide from which range this exch id - * will be allocated from. - * - * Returns : 0 or an valid xid - */ -static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) -{ - u16 xid, min, max; - u16 *plast; - struct fc_exch *ep = NULL; - - if (mp->max_read) { - if (fc_fcp_is_read(fr_fsp(fp))) { - min = mp->min_xid; - max = mp->max_read; - plast = &mp->last_read; - } else { - min = mp->max_read + 1; - max = mp->max_xid; - plast = &mp->last_xid; - } - } else { - min = mp->min_xid; - max = mp->max_xid; - plast = &mp->last_xid; - } - xid = *plast; - do { - xid = (xid == max) ? min : xid + 1; - ep = mp->exches[xid - mp->min_xid]; - } while ((ep != NULL) && (xid != *plast)); - - if (unlikely(ep)) - xid = 0; - else - *plast = xid; - - return xid; -} - /** * fc_exch_em_alloc() - allocate an exchange from a specified EM. * @lport: ptr to the local port * @mp: ptr to the exchange manager - * @fp: ptr to the FC frame - * @xid: input xid * - * if xid is supplied zero then assign next free exchange ID - * from exchange manager, otherwise use supplied xid. - * Returns with exch lock held. + * Returns pointer to allocated fc_exch with exch lock held. */ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, - struct fc_exch_mgr *mp, - struct fc_frame *fp, u16 xid) + struct fc_exch_mgr *mp) { struct fc_exch *ep; + u16 min, max, xid; + + min = mp->min_xid; + max = mp->max_xid; /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); @@ -536,15 +489,14 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, memset(ep, 0, sizeof(*ep)); spin_lock_bh(&mp->em_lock); - /* alloc xid if input xid 0 */ - if (!xid) { - /* alloc a new xid */ - xid = fc_em_alloc_xid(mp, fp); - if (!xid) { - printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); + xid = mp->next_xid; + /* alloc a new xid */ + while (mp->exches[xid - min]) { + xid = (xid == max) ? min : xid + 1; + if (xid == mp->next_xid) goto err; - } } + mp->next_xid = (xid == max) ? min : xid + 1; fc_exch_hold(ep); /* hold for exch in mp */ spin_lock_init(&ep->ex_lock); @@ -597,7 +549,7 @@ struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp) list_for_each_entry(ema, &lport->ema_list, ema_list) { if (!ema->match || ema->match(fp)) { - ep = fc_exch_em_alloc(lport, ema->mp, fp, 0); + ep = fc_exch_em_alloc(lport, ema->mp); if (ep) return ep; } @@ -1817,7 +1769,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, struct fc_exch_mgr *mp; size_t len; - if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; @@ -1826,7 +1778,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, /* * Memory need for EM */ -#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2))) len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); len += sizeof(struct fc_exch_mgr); @@ -1840,17 +1791,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; - mp->last_xid = min_xid - 1; - mp->max_read = 0; - mp->last_read = 0; - if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) { - mp->max_read = lp->lro_xid; - mp->last_read = min_xid - 1; - mp->last_xid = mp->max_read; - } else { - /* disable lro if no xid control over read */ - lp->lro_enabled = 0; - } + mp->next_xid = min_xid; INIT_LIST_HEAD(&mp->ex_list); spin_lock_init(&mp->em_lock); @@ -1922,7 +1863,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + if (ep->xid <= lp->lro_xid) + fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); if (unlikely(lp->tt.frame_send(lp, fp))) goto err; -- cgit v1.2.3 From cd305ce41be1615dfc72892e0642c6b880f58d95 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Tue, 25 Aug 2009 13:58:37 -0700 Subject: [SCSI] libfc: Fix misleading debug statement The statement reads, "Exchange timed out, notifying the upper layer", however, this statement is printed whenever the timer is armed. This is confusing to someone debugging the code because every time an exchange is initialized, there is an incorrect statement stating that the timer has already timed out. This patch changes the statement to read, "Exchange timer armed" which is more accurate. This patch also adds a debug statement in the timeout handler to properly indicate that the exchange has timed out. Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 11ddd115efb6..40c34274bd81 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -326,7 +326,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) return; - FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); + FC_EXCH_DBG(ep, "Exchange timer armed\n"); if (schedule_delayed_work(&ep->timeout_work, msecs_to_jiffies(timer_msec))) @@ -412,6 +412,8 @@ static void fc_exch_timeout(struct work_struct *work) u32 e_stat; int rc = 1; + FC_EXCH_DBG(ep, "Exchange timed out\n"); + spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) goto unlock; -- cgit v1.2.3 From e4bc50bedf0dd6c63f20a7bc0a2b46667664fba1 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 25 Aug 2009 13:58:47 -0700 Subject: [SCSI] fcoe, libfc: adds per cpu exch pool within exchange manager(EM) Adds per cpu exch pool for these reasons:- 1. Currently an EM instance is shared across all cpus to manage all exches for all cpus. This required em_lock across all cpus for an exch alloc, free, lookup and reset each frame and that made em_lock expensive, so instead having per cpu exch pool with their own per cpu pool lock will likely reduce locking contention in fast path for an exch alloc, free and lookup. 2. Per cpu exch pool will likely improve cache hit ratio since all frames of an exch will be processed on the same cpu on which exch originated. This patch is only prep work to help in keeping complexity of next patch low, so this patch only sets up per cpu exch pool and related helper funcs to be used by next patch. The next patch fully makes use of per cpu exch pool in all code paths ie. tx, rx and reset. Divides per EM exch id range equally across all cpus to setup per cpu exch pool. This division is such that lower bits of exch id carries cpu number info on which exch originated, later a simple bitwise AND operation on exch id of incoming frame with fc_cpu_mask retrieves cpu number info to direct all frames to same cpu on which exch originated. This required a global fc_cpu_mask and fc_cpu_order initialized to max possible cpus number nr_cpu_ids rounded up to 2's power, this will be used in mapping exch id and exch ptr array index in pool during exch allocation, find or reset code paths. Adds a check in fc_exch_mgr_alloc() to ensure specified min_xid lower bits are zero since these bits are used to carry cpu info. Adds and initializes struct fc_exch_pool with all required fields to manage exches in pool. Allocates per cpu struct fc_exch_pool with memory for exches array for range of exches per pool. The exches array memory is followed by struct fc_exch_pool. Adds fc_exch_ptr_get/set() helper functions to get/set exch ptr in pool exches array at specified array index. Increases default FCOE_MAX_XID to 0x0FFF from 0x07EF, so that more exches are available per cpu after above described exch id range division across all cpus to each pool. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 89 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 3 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 40c34274bd81..9cbe8d66eb25 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -32,6 +32,9 @@ #include #include +u16 fc_cpu_mask; /* cpu mask for possible cpus */ +EXPORT_SYMBOL(fc_cpu_mask); +static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ /* @@ -47,6 +50,20 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ * fc_seq holds the state for an individual sequence. */ +/* + * Per cpu exchange pool + * + * This structure manages per cpu exchanges in array of exchange pointers. + * This array is allocated followed by struct fc_exch_pool memory for + * assigned range of exchanges to per cpu pool. + */ +struct fc_exch_pool { + u16 next_index; /* next possible free exchange index */ + u16 total_exches; /* total allocated exchanges */ + spinlock_t lock; /* exch pool lock */ + struct list_head ex_list; /* allocated exchanges list */ +}; + /* * Exchange manager. * @@ -66,6 +83,8 @@ struct fc_exch_mgr { u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ mempool_t *ep_pool; /* reserve ep's */ + u16 pool_max_index; /* max exch array index in exch pool */ + struct fc_exch_pool *pool; /* per cpu exch pool */ /* * currently exchange mgr stats are updated but not used. @@ -303,6 +322,19 @@ static int fc_exch_done_locked(struct fc_exch *ep) return rc; } +static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, + u16 index) +{ + struct fc_exch **exches = (struct fc_exch **)(pool + 1); + return exches[index]; +} + +static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, + struct fc_exch *ep) +{ + ((struct fc_exch **)(pool + 1))[index] = ep; +} + static void fc_exch_mgr_delete_ep(struct fc_exch *ep) { struct fc_exch_mgr *mp; @@ -1751,6 +1783,7 @@ static void fc_exch_mgr_destroy(struct kref *kref) */ WARN_ON(mp->total_exches != 0); mempool_destroy(mp->ep_pool); + free_percpu(mp->pool); kfree(mp); } @@ -1770,8 +1803,13 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, { struct fc_exch_mgr *mp; size_t len; + u16 pool_exch_range; + size_t pool_size; + unsigned int cpu; + struct fc_exch_pool *pool; - if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN || + (min_xid & fc_cpu_mask) != 0) { FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", min_xid, max_xid); return NULL; @@ -1802,10 +1840,31 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, if (!mp->ep_pool) goto free_mp; + /* + * Setup per cpu exch pool with entire exchange id range equally + * divided across all cpus. The exch pointers array memory is + * allocated for exch range per pool. + */ + pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1); + mp->pool_max_index = pool_exch_range - 1; + + /* + * Allocate and initialize per cpu exch pool + */ + pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); + mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); + if (!mp->pool) + goto free_mempool; + for_each_possible_cpu(cpu) { + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_init(&pool->lock); + INIT_LIST_HEAD(&pool->ex_list); + } + kref_init(&mp->kref); if (!fc_exch_mgr_add(lp, mp, match)) { - mempool_destroy(mp->ep_pool); - goto free_mp; + free_percpu(mp->pool); + goto free_mempool; } /* @@ -1816,6 +1875,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, kref_put(&mp->kref, fc_exch_mgr_destroy); return mp; +free_mempool: + mempool_destroy(mp->ep_pool); free_mp: kfree(mp); return NULL; @@ -1975,6 +2036,28 @@ int fc_exch_init(struct fc_lport *lp) if (!lp->tt.seq_exch_abort) lp->tt.seq_exch_abort = fc_seq_exch_abort; + /* + * Initialize fc_cpu_mask and fc_cpu_order. The + * fc_cpu_mask is set for nr_cpu_ids rounded up + * to order of 2's * power and order is stored + * in fc_cpu_order as this is later required in + * mapping between an exch id and exch array index + * in per cpu exch pool. + * + * This round up is required to align fc_cpu_mask + * to exchange id's lower bits such that all incoming + * frames of an exchange gets delivered to the same + * cpu on which exchange originated by simple bitwise + * AND operation between fc_cpu_mask and exchange id. + */ + fc_cpu_mask = 1; + fc_cpu_order = 0; + while (fc_cpu_mask < nr_cpu_ids) { + fc_cpu_mask <<= 1; + fc_cpu_order++; + } + fc_cpu_mask--; + return 0; } EXPORT_SYMBOL(fc_exch_init); -- cgit v1.2.3 From b2f0091fbf8b475fa09b5e1712e0ab84cb3e1ca4 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 25 Aug 2009 13:58:53 -0700 Subject: [SCSI] fcoe, libfc: fully makes use of per cpu exch pool and then removes em_lock 1. Updates fcoe_rcv() to queue incoming frames to the fcoe per cpu thread on which this frame's exch was originated and simply use current cpu for request exch not originated by initiator. It is redundant to add this code under CONFIG_SMP, so removes CONFIG_SMP uses around this code. 2. Updates fc_exch_em_alloc, fc_exch_delete, fc_exch_find to use per cpu exch pools, here fc_exch_delete is rename of older fc_exch_mgr_delete_ep since ep/exch are now deleted in pools of EM and so brief new name is sufficient and better name. Updates these functions to map exch id to their index into exch pool using fc_cpu_mask, fc_cpu_order and EM min_xid. This mapping is as per detailed explanation about this in last patch and basically this is just as lower fc_cpu_mask bits of exch id as cpu number and upper bit sum of EM min_xid and exch index in pool. Uses pool next_index to keep track of exch allocation from pool along with pool_max_index as upper bound of exches array in pool. 3. Adds exch pool ptr to fc_exch to free exch to its pool in fc_exch_delete. 4. Updates fc_exch_mgr_reset to reset all exch pools of an EM, this required adding fc_exch_pool_reset func to reset exches in pool and then have fc_exch_mgr_reset call fc_exch_pool_reset for each pool within each EM for a lport. 5. Removes no longer needed exches array, em_lock, next_xid, and total_exches from struct fc_exch_mgr, these are not needed after use of per cpu exch pool, also removes not used max_read, last_read from struct fc_exch_mgr. 6. Updates locking notes for exch pool lock with fc_exch lock and uses pool lock in exch allocation, lookup and reset. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 189 +++++++++++++++++++++++-------------------- 1 file changed, 100 insertions(+), 89 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 9cbe8d66eb25..b51db15a3876 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -73,14 +73,8 @@ struct fc_exch_pool { struct fc_exch_mgr { enum fc_class class; /* default class for sequences */ struct kref kref; /* exchange mgr reference count */ - spinlock_t em_lock; /* exchange manager lock, - must be taken before ex_lock */ - u16 next_xid; /* next possible free exchange ID */ u16 min_xid; /* min exchange ID */ u16 max_xid; /* max exchange ID */ - u16 max_read; /* max exchange ID for read */ - u16 last_read; /* last xid allocated for read */ - u32 total_exches; /* total allocated exchanges */ struct list_head ex_list; /* allocated exchanges list */ mempool_t *ep_pool; /* reserve ep's */ u16 pool_max_index; /* max exch array index in exch pool */ @@ -99,7 +93,6 @@ struct fc_exch_mgr { atomic_t seq_not_found; atomic_t non_bls_resp; } stats; - struct fc_exch **exches; /* for exch pointers indexed by xid */ }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) @@ -192,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp); * sequence allocation and deallocation must be locked. * - exchange refcnt can be done atomicly without locks. * - sequence allocation must be locked by exch lock. - * - If the em_lock and ex_lock must be taken at the same time, then the - * em_lock must be taken before the ex_lock. + * - If the EM pool lock and ex_lock must be taken at the same time, then the + * EM pool lock must be taken before the ex_lock. */ /* @@ -335,17 +328,18 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, ((struct fc_exch **)(pool + 1))[index] = ep; } -static void fc_exch_mgr_delete_ep(struct fc_exch *ep) +static void fc_exch_delete(struct fc_exch *ep) { - struct fc_exch_mgr *mp; + struct fc_exch_pool *pool; - mp = ep->em; - spin_lock_bh(&mp->em_lock); - WARN_ON(mp->total_exches <= 0); - mp->total_exches--; - mp->exches[ep->xid - mp->min_xid] = NULL; + pool = ep->pool; + spin_lock_bh(&pool->lock); + WARN_ON(pool->total_exches <= 0); + pool->total_exches--; + fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order, + NULL); list_del(&ep->ex_list); - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); fc_exch_release(ep); /* drop hold for exch in mp */ } @@ -465,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); fc_seq_exch_abort(sp, 2 * ep->r_a_tov); @@ -509,10 +503,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, struct fc_exch_mgr *mp) { struct fc_exch *ep; - u16 min, max, xid; - - min = mp->min_xid; - max = mp->max_xid; + unsigned int cpu; + u16 index; + struct fc_exch_pool *pool; /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); @@ -522,15 +515,17 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - spin_lock_bh(&mp->em_lock); - xid = mp->next_xid; - /* alloc a new xid */ - while (mp->exches[xid - min]) { - xid = (xid == max) ? min : xid + 1; - if (xid == mp->next_xid) + cpu = smp_processor_id(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); + index = pool->next_index; + /* allocate new exch from pool */ + while (fc_exch_ptr_get(pool, index)) { + index = index == mp->pool_max_index ? 0 : index + 1; + if (index == pool->next_index) goto err; } - mp->next_xid = (xid == max) ? min : xid + 1; + pool->next_index = index == mp->pool_max_index ? 0 : index + 1; fc_exch_hold(ep); /* hold for exch in mp */ spin_lock_init(&ep->ex_lock); @@ -541,17 +536,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, */ spin_lock_bh(&ep->ex_lock); - mp->exches[xid - mp->min_xid] = ep; - list_add_tail(&ep->ex_list, &mp->ex_list); + fc_exch_ptr_set(pool, index, ep); + list_add_tail(&ep->ex_list, &pool->ex_list); fc_seq_alloc(ep, ep->seq_id++); - mp->total_exches++; - spin_unlock_bh(&mp->em_lock); + pool->total_exches++; + spin_unlock_bh(&pool->lock); /* * update exchange */ - ep->oxid = ep->xid = xid; + ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid; ep->em = mp; + ep->pool = pool; ep->lp = lport; ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; @@ -560,7 +556,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, out: return ep; err: - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); atomic_inc(&mp->stats.no_free_exch_xid); mempool_free(ep, mp->ep_pool); return NULL; @@ -597,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc); */ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { + struct fc_exch_pool *pool; struct fc_exch *ep = NULL; if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { - spin_lock_bh(&mp->em_lock); - ep = mp->exches[xid - mp->min_xid]; + pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); + spin_lock_bh(&pool->lock); + ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); if (ep) { fc_exch_hold(ep); WARN_ON(ep->xid != xid); } - spin_unlock_bh(&mp->em_lock); + spin_unlock_bh(&pool->lock); } return ep; } @@ -620,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); } EXPORT_SYMBOL(fc_exch_done); @@ -1213,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); } /* @@ -1323,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, fp, ex_resp_arg); @@ -1466,48 +1464,76 @@ static void fc_exch_reset(struct fc_exch *ep) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); } -/* - * Reset an exchange manager, releasing all sequences and exchanges. - * If sid is non-zero, reset only exchanges we source from that FID. - * If did is non-zero, reset only exchanges destined to that FID. +/** + * fc_exch_pool_reset() - Resets an per cpu exches pool. + * @lport: ptr to the local port + * @pool: ptr to the per cpu exches pool + * @sid: source FC ID + * @did: destination FC ID + * + * Resets an per cpu exches pool, releasing its all sequences + * and exchanges. If sid is non-zero, then reset only exchanges + * we sourced from that FID. If did is non-zero, reset only + * exchanges destined to that FID. */ -void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) +static void fc_exch_pool_reset(struct fc_lport *lport, + struct fc_exch_pool *pool, + u32 sid, u32 did) { struct fc_exch *ep; struct fc_exch *next; - struct fc_exch_mgr *mp; - struct fc_exch_mgr_anchor *ema; - list_for_each_entry(ema, &lp->ema_list, ema_list) { - mp = ema->mp; - spin_lock_bh(&mp->em_lock); + spin_lock_bh(&pool->lock); restart: - list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { - if ((lp == ep->lp) && - (sid == 0 || sid == ep->sid) && - (did == 0 || did == ep->did)) { - fc_exch_hold(ep); - spin_unlock_bh(&mp->em_lock); - - fc_exch_reset(ep); - - fc_exch_release(ep); - spin_lock_bh(&mp->em_lock); - - /* - * must restart loop incase while lock - * was down multiple eps were released. - */ - goto restart; - } + list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { + if ((lport == ep->lp) && + (sid == 0 || sid == ep->sid) && + (did == 0 || did == ep->did)) { + fc_exch_hold(ep); + spin_unlock_bh(&pool->lock); + + fc_exch_reset(ep); + + fc_exch_release(ep); + spin_lock_bh(&pool->lock); + + /* + * must restart loop incase while lock + * was down multiple eps were released. + */ + goto restart; } - spin_unlock_bh(&mp->em_lock); + } + spin_unlock_bh(&pool->lock); +} + +/** + * fc_exch_mgr_reset() - Resets all EMs of a lport + * @lport: ptr to the local port + * @sid: source FC ID + * @did: destination FC ID + * + * Reset all EMs of a lport, releasing its all sequences and + * exchanges. If sid is non-zero, then reset only exchanges + * we sourced from that FID. If did is non-zero, reset only + * exchanges destined to that FID. + */ +void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) +{ + struct fc_exch_mgr_anchor *ema; + unsigned int cpu; + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + for_each_possible_cpu(cpu) + fc_exch_pool_reset(lport, + per_cpu_ptr(ema->mp->pool, cpu), + sid, did); } } EXPORT_SYMBOL(fc_exch_mgr_reset); @@ -1777,11 +1803,6 @@ static void fc_exch_mgr_destroy(struct kref *kref) { struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref); - /* - * The total exch count must be zero - * before freeing exchange manager. - */ - WARN_ON(mp->total_exches != 0); mempool_destroy(mp->ep_pool); free_percpu(mp->pool); kfree(mp); @@ -1802,7 +1823,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, bool (*match)(struct fc_frame *)) { struct fc_exch_mgr *mp; - size_t len; u16 pool_exch_range; size_t pool_size; unsigned int cpu; @@ -1816,25 +1836,16 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, } /* - * Memory need for EM + * allocate memory for EM */ - len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *)); - len += sizeof(struct fc_exch_mgr); - - mp = kzalloc(len, GFP_ATOMIC); + mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC); if (!mp) return NULL; mp->class = class; - mp->total_exches = 0; - mp->exches = (struct fc_exch **)(mp + 1); /* adjust em exch xid range for offload */ mp->min_xid = min_xid; mp->max_xid = max_xid; - mp->next_xid = min_xid; - - INIT_LIST_HEAD(&mp->ex_list); - spin_lock_init(&mp->em_lock); mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); if (!mp->ep_pool) @@ -1944,7 +1955,7 @@ err: rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) - fc_exch_mgr_delete_ep(ep); + fc_exch_delete(ep); return NULL; } EXPORT_SYMBOL(fc_exch_seq_send); -- cgit v1.2.3 From 1d490ce33ee8b93638d09e471a3bc66ae33b6606 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 25 Aug 2009 14:04:03 -0700 Subject: [SCSI] libfc: don't swap OX_ID and RX_ID when sending BA_RJT I saw an lport debug message from the exchange manager saying: "lport 70500: Received response for out of range oxid:ffff" A trace showed this was a BA_RJT sent due to an incoming ABTS which arrived on an unknown exchange. So, the sender of the BA_RJT was in error, but in this case, both the initiator and responder were the same machine. The OX_ID and RX_ID should not have been reversed in this case. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/scsi/libfc/fc_exch.c') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index b51db15a3876..c1c15748220c 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1017,8 +1017,8 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, */ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); - fh->fh_ox_id = rx_fh->fh_rx_id; - fh->fh_rx_id = rx_fh->fh_ox_id; + fh->fh_ox_id = rx_fh->fh_ox_id; + fh->fh_rx_id = rx_fh->fh_rx_id; fh->fh_seq_cnt = rx_fh->fh_seq_cnt; fh->fh_r_ctl = FC_RCTL_BA_RJT; fh->fh_type = FC_TYPE_BLS; -- cgit v1.2.3