Browse Source

Merge branch 'ehci-omap-clock' into omap-fixes

Tony Lindgren 14 năm trước cách đây
mục cha
commit
274353674d
100 tập tin đã thay đổi với 3724 bổ sung4997 xóa
  1. 16 13
      Documentation/filesystems/Locking
  2. 0 174
      Documentation/filesystems/dentry-locking.txt
  3. 382 0
      Documentation/filesystems/path-lookup.txt
  4. 68 1
      Documentation/filesystems/porting
  5. 57 17
      Documentation/filesystems/vfs.txt
  6. 22 0
      Documentation/scsi/ChangeLog.megaraid_sas
  7. 58 55
      Documentation/usb/power-management.txt
  8. 5 1
      arch/arm/mach-davinci/usb.c
  9. 1 0
      arch/arm/mach-omap2/Kconfig
  10. 4 2
      arch/arm/mach-omap2/Makefile
  11. 29 6
      arch/arm/mach-omap2/board-4430sdp.c
  12. 2 3
      arch/arm/mach-omap2/board-n8x0.c
  13. 10 4
      arch/arm/mach-omap2/board-omap4panda.c
  14. 1 1
      arch/arm/mach-omap2/clock2420_data.c
  15. 1 1
      arch/arm/mach-omap2/clock2430_data.c
  16. 9 4
      arch/arm/mach-omap2/clock3xxx_data.c
  17. 6 1
      arch/arm/mach-omap2/clock44xx_data.c
  18. 149 0
      arch/arm/mach-omap2/omap_phy_internal.c
  19. 136 8
      arch/arm/mach-omap2/usb-ehci.c
  20. 102 2
      arch/arm/mach-omap2/usb-musb.c
  21. 1 1
      arch/arm/mach-omap2/usb-tusb6010.c
  22. 5 0
      arch/arm/plat-omap/include/plat/omap44xx.h
  23. 10 0
      arch/arm/plat-omap/include/plat/usb.h
  24. 1 1
      arch/blackfin/mach-bf527/boards/ad7160eval.c
  25. 3 1
      arch/blackfin/mach-bf527/boards/cm_bf527.c
  26. 3 1
      arch/blackfin/mach-bf527/boards/ezbrd.c
  27. 3 1
      arch/blackfin/mach-bf527/boards/ezkit.c
  28. 1 1
      arch/blackfin/mach-bf527/boards/tll6527m.c
  29. 3 1
      arch/blackfin/mach-bf548/boards/cm_bf548.c
  30. 3 1
      arch/blackfin/mach-bf548/boards/ezkit.c
  31. 3 3
      arch/ia64/kernel/perfmon.c
  32. 12 6
      arch/powerpc/platforms/cell/spufs/inode.c
  33. 5 0
      arch/sh/Kconfig
  34. 31 4
      arch/sh/kernel/cpu/sh4a/setup-sh7786.c
  35. 1 0
      drivers/hid/hid-core.c
  36. 2 0
      drivers/hid/hid-ids.h
  37. 2 6
      drivers/infiniband/hw/ipath/ipath_fs.c
  38. 1 4
      drivers/infiniband/hw/qib/qib_fs.c
  39. 1 1
      drivers/input/keyboard/tc3589x-keypad.c
  40. 2 1
      drivers/media/video/tlg2300/pd-main.c
  41. 39 5
      drivers/mfd/twl-core.c
  42. 8 1
      drivers/mfd/twl6030-irq.c
  43. 1 1
      drivers/mtd/mtdchar.c
  44. 1 1
      drivers/net/wimax/i2400m/usb.c
  45. 4 5
      drivers/s390/scsi/zfcp_aux.c
  46. 7 7
      drivers/s390/scsi/zfcp_ccw.c
  47. 4 4
      drivers/s390/scsi/zfcp_cfdc.c
  48. 269 857
      drivers/s390/scsi/zfcp_dbf.c
  49. 258 239
      drivers/s390/scsi/zfcp_dbf.h
  50. 59 82
      drivers/s390/scsi/zfcp_erp.c
  51. 20 32
      drivers/s390/scsi/zfcp_ext.h
  52. 10 10
      drivers/s390/scsi/zfcp_fc.c
  53. 58 55
      drivers/s390/scsi/zfcp_fsf.c
  54. 5 5
      drivers/s390/scsi/zfcp_qdio.c
  55. 23 20
      drivers/s390/scsi/zfcp_scsi.c
  56. 4 5
      drivers/s390/scsi/zfcp_sysfs.c
  57. 1 2
      drivers/scsi/arcmsr/arcmsr_hba.c
  58. 3 2
      drivers/scsi/be2iscsi/be_main.c
  59. 1 3
      drivers/scsi/bfa/Makefile
  60. 3 33
      drivers/scsi/bfa/bfa.h
  61. 0 169
      drivers/scsi/bfa/bfa_cb_ioim.h
  62. 104 269
      drivers/scsi/bfa/bfa_core.c
  63. 49 47
      drivers/scsi/bfa/bfa_cs.h
  64. 3 3
      drivers/scsi/bfa/bfa_defs.h
  65. 4 4
      drivers/scsi/bfa/bfa_defs_svc.h
  66. 0 107
      drivers/scsi/bfa/bfa_drv.c
  67. 151 457
      drivers/scsi/bfa/bfa_fc.h
  68. 52 57
      drivers/scsi/bfa/bfa_fcbuild.c
  69. 15 15
      drivers/scsi/bfa/bfa_fcbuild.h
  70. 120 422
      drivers/scsi/bfa/bfa_fcpim.c
  71. 71 117
      drivers/scsi/bfa/bfa_fcpim.h
  72. 34 172
      drivers/scsi/bfa/bfa_fcs.c
  73. 81 43
      drivers/scsi/bfa/bfa_fcs.h
  74. 15 15
      drivers/scsi/bfa/bfa_fcs_fcpim.c
  75. 177 213
      drivers/scsi/bfa/bfa_fcs_lport.c
  76. 17 213
      drivers/scsi/bfa/bfa_fcs_rport.c
  77. 2 1
      drivers/scsi/bfa/bfa_hw_cb.c
  78. 3 2
      drivers/scsi/bfa/bfa_hw_ct.c
  79. 269 232
      drivers/scsi/bfa/bfa_ioc.c
  80. 21 23
      drivers/scsi/bfa/bfa_ioc.h
  81. 92 5
      drivers/scsi/bfa/bfa_ioc_cb.c
  82. 108 12
      drivers/scsi/bfa/bfa_ioc_ct.c
  83. 0 3
      drivers/scsi/bfa/bfa_modules.h
  84. 0 143
      drivers/scsi/bfa/bfa_os_inc.h
  85. 0 4
      drivers/scsi/bfa/bfa_plog.h
  86. 12 25
      drivers/scsi/bfa/bfa_port.c
  87. 0 1
      drivers/scsi/bfa/bfa_port.h
  88. 180 339
      drivers/scsi/bfa/bfa_svc.c
  89. 14 49
      drivers/scsi/bfa/bfa_svc.h
  90. 18 20
      drivers/scsi/bfa/bfad.c
  91. 9 9
      drivers/scsi/bfa/bfad_attr.c
  92. 39 7
      drivers/scsi/bfa/bfad_debugfs.c
  93. 27 18
      drivers/scsi/bfa/bfad_drv.h
  94. 30 29
      drivers/scsi/bfa/bfad_im.c
  95. 8 8
      drivers/scsi/bfa/bfad_im.h
  96. 4 4
      drivers/scsi/bfa/bfi.h
  97. 1 0
      drivers/scsi/bfa/bfi_cbreg.h
  98. 25 16
      drivers/scsi/bfa/bfi_ctreg.h
  99. 38 28
      drivers/scsi/bfa/bfi_ms.h
  100. 2 1
      drivers/scsi/bnx2i/57xx_iscsi_constants.h

+ 16 - 13
Documentation/filesystems/Locking

@@ -9,22 +9,25 @@ be able to use diff(1).
 
 --------------------------- dentry_operations --------------------------
 prototypes:
-	int (*d_revalidate)(struct dentry *, int);
-	int (*d_hash) (struct dentry *, struct qstr *);
-	int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
+	int (*d_revalidate)(struct dentry *, struct nameidata *);
+	int (*d_hash)(const struct dentry *, const struct inode *,
+			struct qstr *);
+	int (*d_compare)(const struct dentry *, const struct inode *,
+			const struct dentry *, const struct inode *,
+			unsigned int, const char *, const struct qstr *);
 	int (*d_delete)(struct dentry *);
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
 
 locking rules:
-		dcache_lock	rename_lock	->d_lock	may block
-d_revalidate:	no		no		no		yes
-d_hash		no		no		no		yes
-d_compare:	no		yes		no		no 
-d_delete:	yes		no		yes		no
-d_release:	no		no		no		yes
-d_iput:		no		no		no		yes
+		rename_lock	->d_lock	may block	rcu-walk
+d_revalidate:	no		no		yes (ref-walk)	maybe
+d_hash		no		no		no		maybe
+d_compare:	yes		no		no		maybe
+d_delete:	no		yes		no		no
+d_release:	no		no		yes		no
+d_iput:		no		no		yes		no
 d_dname:	no		no		no		no
 
 --------------------------- inode_operations --------------------------- 
@@ -44,8 +47,8 @@ ata *);
 	void * (*follow_link) (struct dentry *, struct nameidata *);
 	void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
-	int (*check_acl)(struct inode *, int);
+	int (*permission) (struct inode *, int, unsigned int);
+	int (*check_acl)(struct inode *, int, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -73,7 +76,7 @@ follow_link:	no
 put_link:	no
 truncate:	yes		(see below)
 setattr:	yes
-permission:	no
+permission:	no (may not block if called in rcu-walk mode)
 check_acl:	no
 getattr:	no
 setxattr:	yes

+ 0 - 174
Documentation/filesystems/dentry-locking.txt

@@ -1,174 +0,0 @@
-RCU-based dcache locking model
-==============================
-
-On many workloads, the most common operation on dcache is to look up a
-dentry, given a parent dentry and the name of the child. Typically,
-for every open(), stat() etc., the dentry corresponding to the
-pathname will be looked up by walking the tree starting with the first
-component of the pathname and using that dentry along with the next
-component to look up the next level and so on. Since it is a frequent
-operation for workloads like multiuser environments and web servers,
-it is important to optimize this path.
-
-Prior to 2.5.10, dcache_lock was acquired in d_lookup and thus in
-every component during path look-up. Since 2.5.10 onwards, fast-walk
-algorithm changed this by holding the dcache_lock at the beginning and
-walking as many cached path component dentries as possible. This
-significantly decreases the number of acquisition of
-dcache_lock. However it also increases the lock hold time
-significantly and affects performance in large SMP machines. Since
-2.5.62 kernel, dcache has been using a new locking model that uses RCU
-to make dcache look-up lock-free.
-
-The current dcache locking model is not very different from the
-existing dcache locking model. Prior to 2.5.62 kernel, dcache_lock
-protected the hash chain, d_child, d_alias, d_lru lists as well as
-d_inode and several other things like mount look-up. RCU-based changes
-affect only the way the hash chain is protected. For everything else
-the dcache_lock must be taken for both traversing as well as
-updating. The hash chain updates too take the dcache_lock.  The
-significant change is the way d_lookup traverses the hash chain, it
-doesn't acquire the dcache_lock for this and rely on RCU to ensure
-that the dentry has not been *freed*.
-
-
-Dcache locking details
-======================
-
-For many multi-user workloads, open() and stat() on files are very
-frequently occurring operations. Both involve walking of path names to
-find the dentry corresponding to the concerned file. In 2.4 kernel,
-dcache_lock was held during look-up of each path component. Contention
-and cache-line bouncing of this global lock caused significant
-scalability problems. With the introduction of RCU in Linux kernel,
-this was worked around by making the look-up of path components during
-path walking lock-free.
-
-
-Safe lock-free look-up of dcache hash table
-===========================================
-
-Dcache is a complex data structure with the hash table entries also
-linked together in other lists. In 2.4 kernel, dcache_lock protected
-all the lists. We applied RCU only on hash chain walking. The rest of
-the lists are still protected by dcache_lock.  Some of the important
-changes are :
-
-1. The deletion from hash chain is done using hlist_del_rcu() macro
-   which doesn't initialize next pointer of the deleted dentry and
-   this allows us to walk safely lock-free while a deletion is
-   happening.
-
-2. Insertion of a dentry into the hash table is done using
-   hlist_add_head_rcu() which take care of ordering the writes - the
-   writes to the dentry must be visible before the dentry is
-   inserted. This works in conjunction with hlist_for_each_rcu(),
-   which has since been replaced by hlist_for_each_entry_rcu(), while
-   walking the hash chain. The only requirement is that all
-   initialization to the dentry must be done before
-   hlist_add_head_rcu() since we don't have dcache_lock protection
-   while traversing the hash chain. This isn't different from the
-   existing code.
-
-3. The dentry looked up without holding dcache_lock by cannot be
-   returned for walking if it is unhashed. It then may have a NULL
-   d_inode or other bogosity since RCU doesn't protect the other
-   fields in the dentry. We therefore use a flag DCACHE_UNHASHED to
-   indicate unhashed dentries and use this in conjunction with a
-   per-dentry lock (d_lock). Once looked up without the dcache_lock,
-   we acquire the per-dentry lock (d_lock) and check if the dentry is
-   unhashed. If so, the look-up is failed. If not, the reference count
-   of the dentry is increased and the dentry is returned.
-
-4. Once a dentry is looked up, it must be ensured during the path walk
-   for that component it doesn't go away. In pre-2.5.10 code, this was
-   done holding a reference to the dentry. dcache_rcu does the same.
-   In some sense, dcache_rcu path walking looks like the pre-2.5.10
-   version.
-
-5. All dentry hash chain updates must take the dcache_lock as well as
-   the per-dentry lock in that order. dput() does this to ensure that
-   a dentry that has just been looked up in another CPU doesn't get
-   deleted before dget() can be done on it.
-
-6. There are several ways to do reference counting of RCU protected
-   objects. One such example is in ipv4 route cache where deferred
-   freeing (using call_rcu()) is done as soon as the reference count
-   goes to zero. This cannot be done in the case of dentries because
-   tearing down of dentries require blocking (dentry_iput()) which
-   isn't supported from RCU callbacks. Instead, tearing down of
-   dentries happen synchronously in dput(), but actual freeing happens
-   later when RCU grace period is over. This allows safe lock-free
-   walking of the hash chains, but a matched dentry may have been
-   partially torn down. The checking of DCACHE_UNHASHED flag with
-   d_lock held detects such dentries and prevents them from being
-   returned from look-up.
-
-
-Maintaining POSIX rename semantics
-==================================
-
-Since look-up of dentries is lock-free, it can race against a
-concurrent rename operation. For example, during rename of file A to
-B, look-up of either A or B must succeed.  So, if look-up of B happens
-after A has been removed from the hash chain but not added to the new
-hash chain, it may fail.  Also, a comparison while the name is being
-written concurrently by a rename may result in false positive matches
-violating rename semantics.  Issues related to race with rename are
-handled as described below :
-
-1. Look-up can be done in two ways - d_lookup() which is safe from
-   simultaneous renames and __d_lookup() which is not.  If
-   __d_lookup() fails, it must be followed up by a d_lookup() to
-   correctly determine whether a dentry is in the hash table or
-   not. d_lookup() protects look-ups using a sequence lock
-   (rename_lock).
-
-2. The name associated with a dentry (d_name) may be changed if a
-   rename is allowed to happen simultaneously. To avoid memcmp() in
-   __d_lookup() go out of bounds due to a rename and false positive
-   comparison, the name comparison is done while holding the
-   per-dentry lock. This prevents concurrent renames during this
-   operation.
-
-3. Hash table walking during look-up may move to a different bucket as
-   the current dentry is moved to a different bucket due to rename.
-   But we use hlists in dcache hash table and they are
-   null-terminated.  So, even if a dentry moves to a different bucket,
-   hash chain walk will terminate. [with a list_head list, it may not
-   since termination is when the list_head in the original bucket is
-   reached].  Since we redo the d_parent check and compare name while
-   holding d_lock, lock-free look-up will not race against d_move().
-
-4. There can be a theoretical race when a dentry keeps coming back to
-   original bucket due to double moves. Due to this look-up may
-   consider that it has never moved and can end up in a infinite loop.
-   But this is not any worse that theoretical livelocks we already
-   have in the kernel.
-
-
-Important guidelines for filesystem developers related to dcache_rcu
-====================================================================
-
-1. Existing dcache interfaces (pre-2.5.62) exported to filesystem
-   don't change. Only dcache internal implementation changes. However
-   filesystems *must not* delete from the dentry hash chains directly
-   using the list macros like allowed earlier. They must use dcache
-   APIs like d_drop() or __d_drop() depending on the situation.
-
-2. d_flags is now protected by a per-dentry lock (d_lock). All access
-   to d_flags must be protected by it.
-
-3. For a hashed dentry, checking of d_count needs to be protected by
-   d_lock.
-
-
-Papers and other documentation on dcache locking
-================================================
-
-1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
-
-2. http://lse.sourceforge.net/locking/dcache/dcache.html
-
-
-

+ 382 - 0
Documentation/filesystems/path-lookup.txt

@@ -0,0 +1,382 @@
+Path walking and name lookup locking
+====================================
+
+Path resolution is the finding a dentry corresponding to a path name string, by
+performing a path walk. Typically, for every open(), stat() etc., the path name
+will be resolved. Paths are resolved by walking the namespace tree, starting
+with the first component of the pathname (eg. root or cwd) with a known dentry,
+then finding the child of that dentry, which is named the next component in the
+path string. Then repeating the lookup from the child dentry and finding its
+child with the next element, and so on.
+
+Since it is a frequent operation for workloads like multiuser environments and
+web servers, it is important to optimize this code.
+
+Path walking synchronisation history:
+Prior to 2.5.10, dcache_lock was acquired in d_lookup (dcache hash lookup) and
+thus in every component during path look-up. Since 2.5.10 onwards, fast-walk
+algorithm changed this by holding the dcache_lock at the beginning and walking
+as many cached path component dentries as possible. This significantly
+decreases the number of acquisition of dcache_lock. However it also increases
+the lock hold time significantly and affects performance in large SMP machines.
+Since 2.5.62 kernel, dcache has been using a new locking model that uses RCU to
+make dcache look-up lock-free.
+
+All the above algorithms required taking a lock and reference count on the
+dentry that was looked up, so that may be used as the basis for walking the
+next path element. This is inefficient and unscalable. It is inefficient
+because of the locks and atomic operations required for every dentry element
+slows things down. It is not scalable because many parallel applications that
+are path-walk intensive tend to do path lookups starting from a common dentry
+(usually, the root "/" or current working directory). So contention on these
+common path elements causes lock and cacheline queueing.
+
+Since 2.6.38, RCU is used to make a significant part of the entire path walk
+(including dcache look-up) completely "store-free" (so, no locks, atomics, or
+even stores into cachelines of common dentries). This is known as "rcu-walk"
+path walking.
+
+Path walking overview
+=====================
+
+A name string specifies a start (root directory, cwd, fd-relative) and a
+sequence of elements (directory entry names), which together refer to a path in
+the namespace. A path is represented as a (dentry, vfsmount) tuple. The name
+elements are sub-strings, seperated by '/'.
+
+Name lookups will want to find a particular path that a name string refers to
+(usually the final element, or parent of final element). This is done by taking
+the path given by the name's starting point (which we know in advance -- eg.
+current->fs->cwd or current->fs->root) as the first parent of the lookup. Then
+iteratively for each subsequent name element, look up the child of the current
+parent with the given name and if it is not the desired entry, make it the
+parent for the next lookup.
+
+A parent, of course, must be a directory, and we must have appropriate
+permissions on the parent inode to be able to walk into it.
+
+Turning the child into a parent for the next lookup requires more checks and
+procedures. Symlinks essentially substitute the symlink name for the target
+name in the name string, and require some recursive path walking.  Mount points
+must be followed into (thus changing the vfsmount that subsequent path elements
+refer to), switching from the mount point path to the root of the particular
+mounted vfsmount. These behaviours are variously modified depending on the
+exact path walking flags.
+
+Path walking then must, broadly, do several particular things:
+- find the start point of the walk;
+- perform permissions and validity checks on inodes;
+- perform dcache hash name lookups on (parent, name element) tuples;
+- traverse mount points;
+- traverse symlinks;
+- lookup and create missing parts of the path on demand.
+
+Safe store-free look-up of dcache hash table
+============================================
+
+Dcache name lookup
+------------------
+In order to lookup a dcache (parent, name) tuple, we take a hash on the tuple
+and use that to select a bucket in the dcache-hash table. The list of entries
+in that bucket is then walked, and we do a full comparison of each entry
+against our (parent, name) tuple.
+
+The hash lists are RCU protected, so list walking is not serialised with
+concurrent updates (insertion, deletion from the hash). This is a standard RCU
+list application with the exception of renames, which will be covered below.
+
+Parent and name members of a dentry, as well as its membership in the dcache
+hash, and its inode are protected by the per-dentry d_lock spinlock. A
+reference is taken on the dentry (while the fields are verified under d_lock),
+and this stabilises its d_inode pointer and actual inode. This gives a stable
+point to perform the next step of our path walk against.
+
+These members are also protected by d_seq seqlock, although this offers
+read-only protection and no durability of results, so care must be taken when
+using d_seq for synchronisation (see seqcount based lookups, below).
+
+Renames
+-------
+Back to the rename case. In usual RCU protected lists, the only operations that
+will happen to an object is insertion, and then eventually removal from the
+list. The object will not be reused until an RCU grace period is complete.
+This ensures the RCU list traversal primitives can run over the object without
+problems (see RCU documentation for how this works).
+
+However when a dentry is renamed, its hash value can change, requiring it to be
+moved to a new hash list. Allocating and inserting a new alias would be
+expensive and also problematic for directory dentries. Latency would be far to
+high to wait for a grace period after removing the dentry and before inserting
+it in the new hash bucket. So what is done is to insert the dentry into the
+new list immediately.
+
+However, when the dentry's list pointers are updated to point to objects in the
+new list before waiting for a grace period, this can result in a concurrent RCU
+lookup of the old list veering off into the new (incorrect) list and missing
+the remaining dentries on the list.
+
+There is no fundamental problem with walking down the wrong list, because the
+dentry comparisons will never match. However it is fatal to miss a matching
+dentry. So a seqlock is used to detect when a rename has occurred, and so the
+lookup can be retried.
+
+         1      2      3
+        +---+  +---+  +---+
+hlist-->| N-+->| N-+->| N-+->
+head <--+-P |<-+-P |<-+-P |
+        +---+  +---+  +---+
+
+Rename of dentry 2 may require it deleted from the above list, and inserted
+into a new list. Deleting 2 gives the following list.
+
+         1             3
+        +---+         +---+     (don't worry, the longer pointers do not
+hlist-->| N-+-------->| N-+->    impose a measurable performance overhead
+head <--+-P |<--------+-P |      on modern CPUs)
+        +---+         +---+
+          ^      2      ^
+          |    +---+    |
+          |    | N-+----+
+          +----+-P |
+               +---+
+
+This is a standard RCU-list deletion, which leaves the deleted object's
+pointers intact, so a concurrent list walker that is currently looking at
+object 2 will correctly continue to object 3 when it is time to traverse the
+next object.
+
+However, when inserting object 2 onto a new list, we end up with this:
+
+         1             3
+        +---+         +---+
+hlist-->| N-+-------->| N-+->
+head <--+-P |<--------+-P |
+        +---+         +---+
+                 2
+               +---+
+               | N-+---->
+          <----+-P |
+               +---+
+
+Because we didn't wait for a grace period, there may be a concurrent lookup
+still at 2. Now when it follows 2's 'next' pointer, it will walk off into
+another list without ever having checked object 3.
+
+A related, but distinctly different, issue is that of rename atomicity versus
+lookup operations. If a file is renamed from 'A' to 'B', a lookup must only
+find either 'A' or 'B'. So if a lookup of 'A' returns NULL, a subsequent lookup
+of 'B' must succeed (note the reverse is not true).
+
+Between deleting the dentry from the old hash list, and inserting it on the new
+hash list, a lookup may find neither 'A' nor 'B' matching the dentry. The same
+rename seqlock is also used to cover this race in much the same way, by
+retrying a negative lookup result if a rename was in progress.
+
+Seqcount based lookups
+----------------------
+In refcount based dcache lookups, d_lock is used to serialise access to
+the dentry, stabilising it while comparing its name and parent and then
+taking a reference count (the reference count then gives a stable place to
+start the next part of the path walk from).
+
+As explained above, we would like to do path walking without taking locks or
+reference counts on intermediate dentries along the path. To do this, a per
+dentry seqlock (d_seq) is used to take a "coherent snapshot" of what the dentry
+looks like (its name, parent, and inode). That snapshot is then used to start
+the next part of the path walk. When loading the coherent snapshot under d_seq,
+care must be taken to load the members up-front, and use those pointers rather
+than reloading from the dentry later on (otherwise we'd have interesting things
+like d_inode going NULL underneath us, if the name was unlinked).
+
+Also important is to avoid performing any destructive operations (pretty much:
+no non-atomic stores to shared data), and to recheck the seqcount when we are
+"done" with the operation. Retry or abort if the seqcount does not match.
+Avoiding destructive or changing operations means we can easily unwind from
+failure.
+
+What this means is that a caller, provided they are holding RCU lock to
+protect the dentry object from disappearing, can perform a seqcount based
+lookup which does not increment the refcount on the dentry or write to
+it in any way. This returned dentry can be used for subsequent operations,
+provided that d_seq is rechecked after that operation is complete.
+
+Inodes are also rcu freed, so the seqcount lookup dentry's inode may also be
+queried for permissions.
+
+With this two parts of the puzzle, we can do path lookups without taking
+locks or refcounts on dentry elements.
+
+RCU-walk path walking design
+============================
+
+Path walking code now has two distinct modes, ref-walk and rcu-walk. ref-walk
+is the traditional[*] way of performing dcache lookups using d_lock to
+serialise concurrent modifications to the dentry and take a reference count on
+it. ref-walk is simple and obvious, and may sleep, take locks, etc while path
+walking is operating on each dentry. rcu-walk uses seqcount based dentry
+lookups, and can perform lookup of intermediate elements without any stores to
+shared data in the dentry or inode. rcu-walk can not be applied to all cases,
+eg. if the filesystem must sleep or perform non trivial operations, rcu-walk
+must be switched to ref-walk mode.
+
+[*] RCU is still used for the dentry hash lookup in ref-walk, but not the full
+    path walk.
+
+Where ref-walk uses a stable, refcounted ``parent'' to walk the remaining
+path string, rcu-walk uses a d_seq protected snapshot. When looking up a
+child of this parent snapshot, we open d_seq critical section on the child
+before closing d_seq critical section on the parent. This gives an interlocking
+ladder of snapshots to walk down.
+
+
+     proc 101
+      /----------------\
+     / comm:    "vi"    \
+    /  fs.root: dentry0  \
+    \  fs.cwd:  dentry2  /
+     \                  /
+      \----------------/
+
+So when vi wants to open("/home/npiggin/test.c", O_RDWR), then it will
+start from current->fs->root, which is a pinned dentry. Alternatively,
+"./test.c" would start from cwd; both names refer to the same path in
+the context of proc101.
+
+     dentry 0
+    +---------------------+   rcu-walk begins here, we note d_seq, check the
+    | name:    "/"        |   inode's permission, and then look up the next
+    | inode:   10         |   path element which is "home"...
+    | children:"home", ...|
+    +---------------------+
+              |
+     dentry 1 V
+    +---------------------+   ... which brings us here. We find dentry1 via
+    | name:    "home"     |   hash lookup, then note d_seq and compare name
+    | inode:   678        |   string and parent pointer. When we have a match,
+    | children:"npiggin"  |   we now recheck the d_seq of dentry0. Then we
+    +---------------------+   check inode and look up the next element.
+              |
+     dentry2  V
+    +---------------------+   Note: if dentry0 is now modified, lookup is
+    | name:    "npiggin"  |   not necessarily invalid, so we need only keep a
+    | inode:   543        |   parent for d_seq verification, and grandparents
+    | children:"a.c", ... |   can be forgotten.
+    +---------------------+
+              |
+     dentry3  V
+    +---------------------+   At this point we have our destination dentry.
+    | name:    "a.c"      |   We now take its d_lock, verify d_seq of this
+    | inode:   14221      |   dentry. If that checks out, we can increment
+    | children:NULL       |   its refcount because we're holding d_lock.
+    +---------------------+
+
+Taking a refcount on a dentry from rcu-walk mode, by taking its d_lock,
+re-checking its d_seq, and then incrementing its refcount is called
+"dropping rcu" or dropping from rcu-walk into ref-walk mode.
+
+It is, in some sense, a bit of a house of cards. If the seqcount check of the
+parent snapshot fails, the house comes down, because we had closed the d_seq
+section on the grandparent, so we have nothing left to stand on. In that case,
+the path walk must be fully restarted (which we do in ref-walk mode, to avoid
+live locks). It is costly to have a full restart, but fortunately they are
+quite rare.
+
+When we reach a point where sleeping is required, or a filesystem callout
+requires ref-walk, then instead of restarting the walk, we attempt to drop rcu
+at the last known good dentry we have. Avoiding a full restart in ref-walk in
+these cases is fundamental for performance and scalability because blocking
+operations such as creates and unlinks are not uncommon.
+
+The detailed design for rcu-walk is like this:
+* LOOKUP_RCU is set in nd->flags, which distinguishes rcu-walk from ref-walk.
+* Take the RCU lock for the entire path walk, starting with the acquiring
+  of the starting path (eg. root/cwd/fd-path). So now dentry refcounts are
+  not required for dentry persistence.
+* synchronize_rcu is called when unregistering a filesystem, so we can
+  access d_ops and i_ops during rcu-walk.
+* Similarly take the vfsmount lock for the entire path walk. So now mnt
+  refcounts are not required for persistence. Also we are free to perform mount
+  lookups, and to assume dentry mount points and mount roots are stable up and
+  down the path.
+* Have a per-dentry seqlock to protect the dentry name, parent, and inode,
+  so we can load this tuple atomically, and also check whether any of its
+  members have changed.
+* Dentry lookups (based on parent, candidate string tuple) recheck the parent
+  sequence after the child is found in case anything changed in the parent
+  during the path walk.
+* inode is also RCU protected so we can load d_inode and use the inode for
+  limited things.
+* i_mode, i_uid, i_gid can be tested for exec permissions during path walk.
+* i_op can be loaded.
+* When the destination dentry is reached, drop rcu there (ie. take d_lock,
+  verify d_seq, increment refcount).
+* If seqlock verification fails anywhere along the path, do a full restart
+  of the path lookup in ref-walk mode. -ECHILD tends to be used (for want of
+  a better errno) to signal an rcu-walk failure.
+
+The cases where rcu-walk cannot continue are:
+* NULL dentry (ie. any uncached path element)
+* Following links
+
+It may be possible eventually to make following links rcu-walk aware.
+
+Uncached path elements will always require dropping to ref-walk mode, at the
+very least because i_mutex needs to be grabbed, and objects allocated.
+
+Final note:
+"store-free" path walking is not strictly store free. We take vfsmount lock
+and refcounts (both of which can be made per-cpu), and we also store to the
+stack (which is essentially CPU-local), and we also have to take locks and
+refcount on final dentry.
+
+The point is that shared data, where practically possible, is not locked
+or stored into. The result is massive improvements in performance and
+scalability of path resolution.
+
+
+Interesting statistics
+======================
+
+The following table gives rcu lookup statistics for a few simple workloads
+(2s12c24t Westmere, debian non-graphical system). Ungraceful are attempts to
+drop rcu that fail due to d_seq failure and requiring the entire path lookup
+again. Other cases are successful rcu-drops that are required before the final
+element, nodentry for missing dentry, revalidate for filesystem revalidate
+routine requiring rcu drop, permission for permission check requiring drop,
+and link for symlink traversal requiring drop.
+
+     rcu-lookups     restart  nodentry          link  revalidate  permission
+bootup     47121           0      4624          1010       10283        7852
+dbench  25386793           0   6778659(26.7%)     55         549        1156
+kbuild   2696672          10     64442(2.3%)  108764(4.0%)     1        1590
+git diff   39605           0        28             2           0         106
+vfstest 24185492        4945    708725(2.9%) 1076136(4.4%)     0        2651
+
+What this shows is that failed rcu-walk lookups, ie. ones that are restarted
+entirely with ref-walk, are quite rare. Even the "vfstest" case which
+specifically has concurrent renames/mkdir/rmdir/ creat/unlink/etc to excercise
+such races is not showing a huge amount of restarts.
+
+Dropping from rcu-walk to ref-walk mean that we have encountered a dentry where
+the reference count needs to be taken for some reason. This is either because
+we have reached the target of the path walk, or because we have encountered a
+condition that can't be resolved in rcu-walk mode.  Ideally, we drop rcu-walk
+only when we have reached the target dentry, so the other statistics show where
+this does not happen.
+
+Note that a graceful drop from rcu-walk mode due to something such as the
+dentry not existing (which can be common) is not necessarily a failure of
+rcu-walk scheme, because some elements of the path may have been walked in
+rcu-walk mode. The further we get from common path elements (such as cwd or
+root), the less contended the dentry is likely to be. The closer we are to
+common path elements, the more likely they will exist in dentry cache.
+
+
+Papers and other documentation on dcache locking
+================================================
+
+1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
+
+2. http://lse.sourceforge.net/locking/dcache/dcache.html
+
+

+ 68 - 1
Documentation/filesystems/porting

@@ -216,7 +216,6 @@ had ->revalidate()) add calls in ->follow_link()/->readlink().
 ->d_parent changes are not protected by BKL anymore.  Read access is safe
 if at least one of the following is true:
 	* filesystem has no cross-directory rename()
-	* dcache_lock is held
 	* we know that parent had been locked (e.g. we are looking at
 ->d_parent of ->lookup() argument).
 	* we are called from ->rename().
@@ -318,3 +317,71 @@ if it's zero is not *and* *never* *had* *been* enough.  Final unlink() and iput(
 may happen while the inode is in the middle of ->write_inode(); e.g. if you blindly
 free the on-disk inode, you may end up doing that while ->write_inode() is writing
 to it.
+
+---
+[mandatory]
+
+	.d_delete() now only advises the dcache as to whether or not to cache
+unreferenced dentries, and is now only called when the dentry refcount goes to
+0. Even on 0 refcount transition, it must be able to tolerate being called 0,
+1, or more times (eg. constant, idempotent).
+
+---
+[mandatory]
+
+	.d_compare() calling convention and locking rules are significantly
+changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
+look at examples of other filesystems) for guidance.
+
+---
+[mandatory]
+
+	.d_hash() calling convention and locking rules are significantly
+changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
+look at examples of other filesystems) for guidance.
+
+---
+[mandatory]
+	dcache_lock is gone, replaced by fine grained locks. See fs/dcache.c
+for details of what locks to replace dcache_lock with in order to protect
+particular things. Most of the time, a filesystem only needs ->d_lock, which
+protects *all* the dcache state of a given dentry.
+
+--
+[mandatory]
+
+	Filesystems must RCU-free their inodes, if they can have been accessed
+via rcu-walk path walk (basically, if the file can have had a path name in the
+vfs namespace).
+
+	i_dentry and i_rcu share storage in a union, and the vfs expects
+i_dentry to be reinitialized before it is freed, so an:
+
+  INIT_LIST_HEAD(&inode->i_dentry);
+
+must be done in the RCU callback.
+
+--
+[recommended]
+	vfs now tries to do path walking in "rcu-walk mode", which avoids
+atomic operations and scalability hazards on dentries and inodes (see
+Documentation/filesystems/path-walk.txt). d_hash and d_compare changes (above)
+are examples of the changes required to support this. For more complex
+filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
+no changes are required to the filesystem. However, this is costly and loses
+the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
+are rcu-walk aware, shown below. Filesystems should take advantage of this
+where possible.
+
+--
+[mandatory]
+	d_revalidate is a callback that is made on every path element (if
+the filesystem provides it), which requires dropping out of rcu-walk mode. This
+may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be
+returned if the filesystem cannot handle rcu-walk. See
+Documentation/filesystems/vfs.txt for more details.
+
+	permission and check_acl are inode permission checks that are called
+on many or all directory inodes on the way down a path walk (to check for
+exec permission). These must now be rcu-walk aware (flags & IPERM_RCU). See
+Documentation/filesystems/vfs.txt for more details.

+ 57 - 17
Documentation/filesystems/vfs.txt

@@ -325,7 +325,8 @@ struct inode_operations {
         void * (*follow_link) (struct dentry *, struct nameidata *);
         void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
+	int (*permission) (struct inode *, int, unsigned int);
+	int (*check_acl)(struct inode *, int, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -414,6 +415,13 @@ otherwise noted.
   permission: called by the VFS to check for access rights on a POSIX-like
   	filesystem.
 
+	May be called in rcu-walk mode (flags & IPERM_RCU). If in rcu-walk
+	mode, the filesystem must check the permission without blocking or
+	storing to the inode.
+
+	If a situation is encountered that rcu-walk cannot handle, return
+	-ECHILD and it will be called again in ref-walk mode.
+
   setattr: called by the VFS to set attributes for a file. This method
   	is called by chmod(2) and related system calls.
 
@@ -847,9 +855,12 @@ defined:
 
 struct dentry_operations {
 	int (*d_revalidate)(struct dentry *, struct nameidata *);
-	int (*d_hash) (struct dentry *, struct qstr *);
-	int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
-	int (*d_delete)(struct dentry *);
+	int (*d_hash)(const struct dentry *, const struct inode *,
+			struct qstr *);
+	int (*d_compare)(const struct dentry *, const struct inode *,
+			const struct dentry *, const struct inode *,
+			unsigned int, const char *, const struct qstr *);
+	int (*d_delete)(const struct dentry *);
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
@@ -860,13 +871,45 @@ struct dentry_operations {
 	dcache. Most filesystems leave this as NULL, because all their
 	dentries in the dcache are valid
 
-  d_hash: called when the VFS adds a dentry to the hash table
+	d_revalidate may be called in rcu-walk mode (nd->flags & LOOKUP_RCU).
+	If in rcu-walk mode, the filesystem must revalidate the dentry without
+	blocking or storing to the dentry, d_parent and d_inode should not be
+	used without care (because they can go NULL), instead nd->inode should
+	be used.
+
+	If a situation is encountered that rcu-walk cannot handle, return
+	-ECHILD and it will be called again in ref-walk mode.
+
+  d_hash: called when the VFS adds a dentry to the hash table. The first
+	dentry passed to d_hash is the parent directory that the name is
+	to be hashed into. The inode is the dentry's inode.
+
+	Same locking and synchronisation rules as d_compare regarding
+	what is safe to dereference etc.
+
+  d_compare: called to compare a dentry name with a given name. The first
+	dentry is the parent of the dentry to be compared, the second is
+	the parent's inode, then the dentry and inode (may be NULL) of the
+	child dentry. len and name string are properties of the dentry to be
+	compared. qstr is the name to compare it with.
+
+	Must be constant and idempotent, and should not take locks if
+	possible, and should not or store into the dentry or inodes.
+	Should not dereference pointers outside the dentry or inodes without
+	lots of care (eg.  d_parent, d_inode, d_name should not be used).
+
+	However, our vfsmount is pinned, and RCU held, so the dentries and
+	inodes won't disappear, neither will our sb or filesystem module.
+	->i_sb and ->d_sb may be used.
 
-  d_compare: called when a dentry should be compared with another
+	It is a tricky calling convention because it needs to be called under
+	"rcu-walk", ie. without any locks or references on things.
 
-  d_delete: called when the last reference to a dentry is
-	deleted. This means no-one is using the dentry, however it is
-	still valid and in the dcache
+  d_delete: called when the last reference to a dentry is dropped and the
+	dcache is deciding whether or not to cache it. Return 1 to delete
+	immediately, or 0 to cache the dentry. Default is NULL which means to
+	always cache a reachable dentry. d_delete must be constant and
+	idempotent.
 
   d_release: called when a dentry is really deallocated
 
@@ -910,14 +953,11 @@ manipulate dentries:
 	the usage count)
 
   dput: close a handle for a dentry (decrements the usage count). If
-	the usage count drops to 0, the "d_delete" method is called
-	and the dentry is placed on the unused list if the dentry is
-	still in its parents hash list. Putting the dentry on the
-	unused list just means that if the system needs some RAM, it
-	goes through the unused list of dentries and deallocates them.
-	If the dentry has already been unhashed and the usage count
-	drops to 0, in this case the dentry is deallocated after the
-	"d_delete" method is called
+	the usage count drops to 0, and the dentry is still in its
+	parent's hash, the "d_delete" method is called to check whether
+	it should be cached. If it should not be cached, or if the dentry
+	is not hashed, it is deleted. Otherwise cached dentries are put
+	into an LRU list to be reclaimed on memory shortage.
 
   d_drop: this unhashes a dentry from its parents hash list. A
 	subsequent call to dput() will deallocate the dentry if its

+ 22 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,25 @@
+Release Date    : Tues.  Dec 14, 2010 17:00:00 PST 2010 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.05.29-rc1
+Old Version     : 00.00.04.31-rc1
+    1. Rename megaraid_sas.c to megaraid_sas_base.c.
+    2. Update GPL headers.
+    3. Add MSI-X support and 'msix_disable' module parameter.
+    4. Use lowest memory bar (for SR-IOV VF support).
+    5. Add struct megasas_instance_temlate changes, and change all code to use
+       new instance entries:
+
+       irqreturn_t (*service_isr )(int irq, void *devp);
+       void (*tasklet)(unsigned long);
+       u32 (*init_adapter)(struct megasas_instance *);
+       u32 (*build_and_issue_cmd) (struct megasas_instance *,
+       struct scsi_cmnd *);
+       void (*issue_dcmd) (struct megasas_instance *instance,
+                              struct megasas_cmd *cmd);
+
+   6. Add code to support MegaRAID 9265/9285 controllers device id (0x5b).
+-------------------------------------------------------------------------------
 1 Release Date    : Thur.  May 03, 2010 09:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang

+ 58 - 55
Documentation/usb/power-management.txt

@@ -2,7 +2,7 @@
 
 		 Alan Stern <stern@rowland.harvard.edu>
 
-			    December 11, 2009
+			    October 28, 2010
 
 
 
@@ -107,9 +107,14 @@ allowed to issue dynamic suspends.
 The user interface for controlling dynamic PM is located in the power/
 subdirectory of each USB device's sysfs directory, that is, in
 /sys/bus/usb/devices/.../power/ where "..." is the device's ID.  The
-relevant attribute files are: wakeup, control, and autosuspend.
-(There may also be a file named "level"; this file was deprecated
-as of the 2.6.35 kernel and replaced by the "control" file.)
+relevant attribute files are: wakeup, control, and
+autosuspend_delay_ms.  (There may also be a file named "level"; this
+file was deprecated as of the 2.6.35 kernel and replaced by the
+"control" file.  In 2.6.38 the "autosuspend" file will be deprecated
+and replaced by the "autosuspend_delay_ms" file.  The only difference
+is that the newer file expresses the delay in milliseconds whereas the
+older file uses seconds.  Confusingly, both files are present in 2.6.37
+but only "autosuspend" works.)
 
 	power/wakeup
 
@@ -140,33 +145,36 @@ as of the 2.6.35 kernel and replaced by the "control" file.)
 		suspended and autoresume was not allowed.  This
 		setting is no longer supported.)
 
-	power/autosuspend
+	power/autosuspend_delay_ms
 
 		This file contains an integer value, which is the
-		number of seconds the device should remain idle before
-		the kernel will autosuspend it (the idle-delay time).
-		The default is 2.  0 means to autosuspend as soon as
-		the device becomes idle, and negative values mean
-		never to autosuspend.  You can write a number to the
-		file to change the autosuspend idle-delay time.
-
-Writing "-1" to power/autosuspend and writing "on" to power/control do
-essentially the same thing -- they both prevent the device from being
-autosuspended.  Yes, this is a redundancy in the API.
+		number of milliseconds the device should remain idle
+		before the kernel will autosuspend it (the idle-delay
+		time).  The default is 2000.  0 means to autosuspend
+		as soon as the device becomes idle, and negative
+		values mean never to autosuspend.  You can write a
+		number to the file to change the autosuspend
+		idle-delay time.
+
+Writing "-1" to power/autosuspend_delay_ms and writing "on" to
+power/control do essentially the same thing -- they both prevent the
+device from being autosuspended.  Yes, this is a redundancy in the
+API.
 
 (In 2.6.21 writing "0" to power/autosuspend would prevent the device
 from being autosuspended; the behavior was changed in 2.6.22.  The
 power/autosuspend attribute did not exist prior to 2.6.21, and the
 power/level attribute did not exist prior to 2.6.22.  power/control
-was added in 2.6.34.)
+was added in 2.6.34, and power/autosuspend_delay_ms was added in
+2.6.37 but did not become functional until 2.6.38.)
 
 
 	Changing the default idle-delay time
 	------------------------------------
 
-The default autosuspend idle-delay time is controlled by a module
-parameter in usbcore.  You can specify the value when usbcore is
-loaded.  For example, to set it to 5 seconds instead of 2 you would
+The default autosuspend idle-delay time (in seconds) is controlled by
+a module parameter in usbcore.  You can specify the value when usbcore
+is loaded.  For example, to set it to 5 seconds instead of 2 you would
 do:
 
 	modprobe usbcore autosuspend=5
@@ -234,25 +242,23 @@ every device.
 
 If a driver knows that its device has proper suspend/resume support,
 it can enable autosuspend all by itself.  For example, the video
-driver for a laptop's webcam might do this, since these devices are
-rarely used and so should normally be autosuspended.
+driver for a laptop's webcam might do this (in recent kernels they
+do), since these devices are rarely used and so should normally be
+autosuspended.
 
 Sometimes it turns out that even when a device does work okay with
-autosuspend there are still problems.  For example, there are
-experimental patches adding autosuspend support to the usbhid driver,
-which manages keyboards and mice, among other things.  Tests with a
-number of keyboards showed that typing on a suspended keyboard, while
-causing the keyboard to do a remote wakeup all right, would
-nonetheless frequently result in lost keystrokes.  Tests with mice
-showed that some of them would issue a remote-wakeup request in
-response to button presses but not to motion, and some in response to
-neither.
+autosuspend there are still problems.  For example, the usbhid driver,
+which manages keyboards and mice, has autosuspend support.  Tests with
+a number of keyboards show that typing on a suspended keyboard, while
+causing the keyboard to do a remote wakeup all right, will nonetheless
+frequently result in lost keystrokes.  Tests with mice show that some
+of them will issue a remote-wakeup request in response to button
+presses but not to motion, and some in response to neither.
 
 The kernel will not prevent you from enabling autosuspend on devices
 that can't handle it.  It is even possible in theory to damage a
-device by suspending it at the wrong time -- for example, suspending a
-USB hard disk might cause it to spin down without parking the heads.
-(Highly unlikely, but possible.)  Take care.
+device by suspending it at the wrong time.  (Highly unlikely, but
+possible.)  Take care.
 
 
 	The driver interface for Power Management
@@ -336,10 +342,6 @@ autosuspend the interface's device.  When the usage counter is = 0
 then the interface is considered to be idle, and the kernel may
 autosuspend the device.
 
-(There is a similar usage counter field in struct usb_device,
-associated with the device itself rather than any of its interfaces.
-This counter is used only by the USB core.)
-
 Drivers need not be concerned about balancing changes to the usage
 counter; the USB core will undo any remaining "get"s when a driver
 is unbound from its interface.  As a corollary, drivers must not call
@@ -409,11 +411,11 @@ during autosuspend.  For example, there's not much point
 autosuspending a keyboard if the user can't cause the keyboard to do a
 remote wakeup by typing on it.  If the driver sets
 intf->needs_remote_wakeup to 1, the kernel won't autosuspend the
-device if remote wakeup isn't available or has been disabled through
-the power/wakeup attribute.  (If the device is already autosuspended,
-though, setting this flag won't cause the kernel to autoresume it.
-Normally a driver would set this flag in its probe method, at which
-time the device is guaranteed not to be autosuspended.)
+device if remote wakeup isn't available.  (If the device is already
+autosuspended, though, setting this flag won't cause the kernel to
+autoresume it.  Normally a driver would set this flag in its probe
+method, at which time the device is guaranteed not to be
+autosuspended.)
 
 If a driver does its I/O asynchronously in interrupt context, it
 should call usb_autopm_get_interface_async() before starting output and
@@ -422,20 +424,19 @@ it receives an input event, it should call
 
 	usb_mark_last_busy(struct usb_device *udev);
 
-in the event handler.  This sets udev->last_busy to the current time.
-udev->last_busy is the field used for idle-delay calculations;
-updating it will cause any pending autosuspend to be moved back.  Most
-of the usb_autopm_* routines will also set the last_busy field to the
-current time.
+in the event handler.  This tells the PM core that the device was just
+busy and therefore the next autosuspend idle-delay expiration should
+be pushed back.  Many of the usb_autopm_* routines also make this call,
+so drivers need to worry only when interrupt-driven input arrives.
 
 Asynchronous operation is always subject to races.  For example, a
-driver may call one of the usb_autopm_*_interface_async() routines at
-a time when the core has just finished deciding the device has been
-idle for long enough but not yet gotten around to calling the driver's
-suspend method.  The suspend method must be responsible for
-synchronizing with the output request routine and the URB completion
-handler; it should cause autosuspends to fail with -EBUSY if the
-driver needs to use the device.
+driver may call the usb_autopm_get_interface_async() routine at a time
+when the core has just finished deciding the device has been idle for
+long enough but not yet gotten around to calling the driver's suspend
+method.  The suspend method must be responsible for synchronizing with
+the I/O request routine and the URB completion handler; it should
+cause autosuspends to fail with -EBUSY if the driver needs to use the
+device.
 
 External suspend calls should never be allowed to fail in this way,
 only autosuspend calls.  The driver can tell them apart by checking
@@ -472,7 +473,9 @@ Firstly, a device may already be autosuspended when a system suspend
 occurs.  Since system suspends are supposed to be as transparent as
 possible, the device should remain suspended following the system
 resume.  But this theory may not work out well in practice; over time
-the kernel's behavior in this regard has changed.
+the kernel's behavior in this regard has changed.  As of 2.6.37 the
+policy is to resume all devices during a system resume and let them
+handle their own runtime suspends afterward.
 
 Secondly, a dynamic power-management event may occur as a system
 suspend is underway.  The window for this is short, since system

+ 5 - 1
arch/arm/mach-davinci/usb.c

@@ -64,17 +64,19 @@ static struct resource usb_resources[] = {
 	{
 		.start          = IRQ_USBINT,
 		.flags          = IORESOURCE_IRQ,
+		.name		= "mc"
 	},
 	{
 		/* placeholder for the dedicated CPPI IRQ */
 		.flags          = IORESOURCE_IRQ,
+		.name		= "dma"
 	},
 };
 
 static u64 usb_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device usb_dev = {
-	.name           = "musb_hdrc",
+	.name           = "musb-davinci",
 	.id             = -1,
 	.dev = {
 		.platform_data		= &usb_data,
@@ -110,6 +112,7 @@ static struct resource da8xx_usb20_resources[] = {
 	{
 		.start		= IRQ_DA8XX_USB_INT,
 		.flags		= IORESOURCE_IRQ,
+		.name		= "mc",
 	},
 };
 
@@ -121,6 +124,7 @@ int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
 
 	usb_dev.resource = da8xx_usb20_resources;
 	usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources);
+	usb_dev.name = "musb-da8xx";
 
 	return platform_device_register(&usb_dev);
 }

+ 1 - 0
arch/arm/mach-omap2/Kconfig

@@ -48,6 +48,7 @@ config ARCH_OMAP4
 	select ARM_ERRATA_720789
 	select ARCH_HAS_OPP
 	select PM_OPP if PM
+	select USB_ARCH_HAS_EHCI
 
 comment "OMAP Core Type"
 	depends on ARCH_OMAP2

+ 4 - 2
arch/arm/mach-omap2/Makefile

@@ -209,9 +209,11 @@ obj-$(CONFIG_MACH_IGEP0030)		+= board-igep0030.o \
 obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)	+= board-omap3touchbook.o \
 					   hsmmc.o
 obj-$(CONFIG_MACH_OMAP_4430SDP)		+= board-4430sdp.o \
-					   hsmmc.o
+					   hsmmc.o \
+					   omap_phy_internal.o
 obj-$(CONFIG_MACH_OMAP4_PANDA)		+= board-omap4panda.o \
-					   hsmmc.o
+					   hsmmc.o \
+					   omap_phy_internal.o
 
 obj-$(CONFIG_MACH_OMAP3517EVM)		+= board-am3517evm.o
 

+ 29 - 6
arch/arm/mach-omap2/board-4430sdp.c

@@ -44,6 +44,7 @@
 #define ETH_KS8851_IRQ			34
 #define ETH_KS8851_POWER_ON		48
 #define ETH_KS8851_QUART		138
+#define OMAP4SDP_MDM_PWR_EN_GPIO	157
 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO	184
 #define OMAP4_SFH7741_ENABLE_GPIO		188
 
@@ -250,12 +251,29 @@ static void __init omap_4430sdp_init_irq(void)
 	gic_init_irq();
 }
 
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0]	= EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[1]	= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[2]	= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.phy_reset	= false,
+	.reset_gpio_port[0]  = -EINVAL,
+	.reset_gpio_port[1]  = -EINVAL,
+	.reset_gpio_port[2]  = -EINVAL,
+};
+
 static struct omap_musb_board_data musb_board_data = {
 	.interface_type		= MUSB_INTERFACE_UTMI,
-	.mode			= MUSB_PERIPHERAL,
+	.mode			= MUSB_OTG,
 	.power			= 100,
 };
 
+static struct twl4030_usb_data omap4_usbphy_data = {
+	.phy_init	= omap4430_phy_init,
+	.phy_exit	= omap4430_phy_exit,
+	.phy_power	= omap4430_phy_power,
+	.phy_set_clock	= omap4430_phy_set_clk,
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	{
 		.mmc		= 1,
@@ -475,6 +493,7 @@ static struct twl4030_platform_data sdp4430_twldata = {
 	.vaux1		= &sdp4430_vaux1,
 	.vaux2		= &sdp4430_vaux2,
 	.vaux3		= &sdp4430_vaux3,
+	.usb		= &omap4_usbphy_data
 };
 
 static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
@@ -555,11 +574,15 @@ static void __init omap_4430sdp_init(void)
 	platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
 	omap_serial_init();
 	omap4_twl6030_hsmmc_init(mmc);
-	/* OMAP4 SDP uses internal transceiver so register nop transceiver */
-	usb_nop_xceiv_register();
-	/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
-	if (!cpu_is_omap44xx())
-		usb_musb_init(&musb_board_data);
+
+	/* Power on the ULPI PHY */
+	if (gpio_is_valid(OMAP4SDP_MDM_PWR_EN_GPIO)) {
+		/* FIXME: Assumes pad is already muxed for GPIO mode */
+		gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+		gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
+	}
+	usb_ehci_init(&ehci_pdata);
+	usb_musb_init(&musb_board_data);
 
 	status = omap_ethernet_init();
 	if (status) {

+ 2 - 3
arch/arm/mach-omap2/board-n8x0.c

@@ -46,8 +46,7 @@ static struct device *mmc_device;
 #define TUSB6010_GPIO_ENABLE	0
 #define TUSB6010_DMACHAN	0x3f
 
-#if defined(CONFIG_USB_TUSB6010) || \
-	defined(CONFIG_USB_TUSB6010_MODULE)
+#ifdef CONFIG_USB_MUSB_TUSB6010
 /*
  * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
  * 1.5 V voltage regulators of PM companion chip. Companion chip will then
@@ -134,7 +133,7 @@ err:
 
 static void __init n8x0_usb_init(void) {}
 
-#endif /*CONFIG_USB_TUSB6010 */
+#endif /*CONFIG_USB_MUSB_TUSB6010 */
 
 
 static struct omap2_mcspi_device_config p54spi_mcspi_config = {

+ 10 - 4
arch/arm/mach-omap2/board-omap4panda.c

@@ -144,10 +144,17 @@ error1:
 
 static struct omap_musb_board_data musb_board_data = {
 	.interface_type		= MUSB_INTERFACE_UTMI,
-	.mode			= MUSB_PERIPHERAL,
+	.mode			= MUSB_OTG,
 	.power			= 100,
 };
 
+static struct twl4030_usb_data omap4_usbphy_data = {
+	.phy_init	= omap4430_phy_init,
+	.phy_exit	= omap4430_phy_exit,
+	.phy_power	= omap4430_phy_power,
+	.phy_set_clock	= omap4430_phy_set_clk,
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	{
 		.mmc		= 1,
@@ -357,6 +364,7 @@ static struct twl4030_platform_data omap4_panda_twldata = {
 	.vaux1		= &omap4_panda_vaux1,
 	.vaux2		= &omap4_panda_vaux2,
 	.vaux3		= &omap4_panda_vaux3,
+	.usb		= &omap4_usbphy_data,
 };
 
 static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = {
@@ -404,9 +412,7 @@ static void __init omap4_panda_init(void)
 	/* OMAP4 Panda uses internal transceiver so register nop transceiver */
 	usb_nop_xceiv_register();
 	omap4_ehci_init();
-	/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
-	if (!cpu_is_omap44xx())
-		usb_musb_init(&musb_board_data);
+	usb_musb_init(&musb_board_data);
 }
 
 static void __init omap4_panda_map_io(void)

+ 1 - 1
arch/arm/mach-omap2/clock2420_data.c

@@ -1877,7 +1877,7 @@ static struct omap_clk omap2420_clks[] = {
 	CLK("omap-aes",	"ick",	&aes_ick,	CK_242X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_242X),
 	CLK(NULL,	"usb_fck",	&usb_fck,	CK_242X),
-	CLK("musb_hdrc",	"fck",	&osc_ck,	CK_242X),
+	CLK("musb-hdrc",	"fck",	&osc_ck,	CK_242X),
 };
 
 /*

+ 1 - 1
arch/arm/mach-omap2/clock2430_data.c

@@ -1983,7 +1983,7 @@ static struct omap_clk omap2430_clks[] = {
 	CLK("omap-aes",	"ick",	&aes_ick,	CK_243X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_243X),
 	CLK(NULL,	"usb_fck",	&usb_fck,	CK_243X),
-	CLK("musb_hdrc",	"ick",	&usbhs_ick,	CK_243X),
+	CLK("musb-omap2430",	"ick",	&usbhs_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "ick",	&mmchs1_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "fck",	&mmchs1_fck,	CK_243X),
 	CLK("mmci-omap-hs.1", "ick",	&mmchs2_ick,	CK_243X),

+ 9 - 4
arch/arm/mach-omap2/clock3xxx_data.c

@@ -3286,6 +3286,7 @@ static struct omap_clk omap3xxx_clks[] = {
 	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"ts_fck",	&ts_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("omap-mcbsp.1",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK("omap-mcbsp.5",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK(NULL,	"core_96m_fck",	&core_96m_fck,	CK_3XXX),
@@ -3313,14 +3314,15 @@ static struct omap_clk omap3xxx_clks[] = {
 	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es1,	CK_3430ES1),
 	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"core_l3_ick",	&core_l3_ick,	CK_3XXX),
-	CLK("musb_hdrc",	"ick",	&hsotgusb_ick_3430es1,	CK_3430ES1),
-	CLK("musb_hdrc",	"ick",	&hsotgusb_ick_3430es2,	CK_3430ES2PLUS | CK_36XX),
+	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es1,	CK_3430ES1),
+	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"sdrc_ick",	&sdrc_ick,	CK_3XXX),
 	CLK(NULL,	"gpmc_fck",	&gpmc_fck,	CK_3XXX),
 	CLK(NULL,	"security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"core_l4_ick",	&core_l4_ick,	CK_3XXX),
 	CLK(NULL,	"usbtll_ick",	&usbtll_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbtll_ick",	&usbtll_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("mmci-omap-hs.2",	"ick",	&mmchs3_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"icr_ick",	&icr_ick,	CK_34XX | CK_36XX),
 	CLK("omap-aes",	"ick",	&aes2_ick,	CK_34XX | CK_36XX),
@@ -3366,8 +3368,11 @@ static struct omap_clk omap3xxx_clks[] = {
 	CLK(NULL,	"cam_ick",	&cam_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck,	CK_34XX | CK_36XX),
 	CLK(NULL,	"usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usbhost_ick",	&usbhost_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbhost_ick",	&usbhost_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"usim_fck",	&usim_fck,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"gpt1_fck",	&gpt1_fck,	CK_3XXX),
 	CLK(NULL,	"wkup_32k_fck",	&wkup_32k_fck,	CK_3XXX),
@@ -3445,8 +3450,8 @@ static struct omap_clk omap3xxx_clks[] = {
 	CLK("davinci_emac",	"phy_clk",	&emac_fck,	CK_AM35XX),
 	CLK("vpfe-capture",	"master",	&vpfe_ick,	CK_AM35XX),
 	CLK("vpfe-capture",	"slave",	&vpfe_fck,	CK_AM35XX),
-	CLK("musb_hdrc",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
-	CLK("musb_hdrc",	"fck",		&hsotgusb_fck_am35xx,	CK_AM35XX),
+	CLK("musb-am35x",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
+	CLK("musb-am35x",	"fck",		&hsotgusb_fck_am35xx,	CK_AM35XX),
 	CLK(NULL,	"hecc_ck",	&hecc_ck,	CK_AM35XX),
 	CLK(NULL,	"uart4_ick",	&uart4_ick_am35xx,	CK_AM35XX),
 };

+ 6 - 1
arch/arm/mach-omap2/clock44xx_data.c

@@ -3198,6 +3198,7 @@ static struct omap_clk omap44xx_clks[] = {
 	CLK(NULL,	"uart3_fck",			&uart3_fck,	CK_443X),
 	CLK(NULL,	"uart4_fck",			&uart4_fck,	CK_443X),
 	CLK(NULL,	"usb_host_fs_fck",		&usb_host_fs_fck,	CK_443X),
+	CLK("ehci-omap.0",	"fs_fck",		&usb_host_fs_fck,	CK_443X),
 	CLK(NULL,	"utmi_p1_gfclk",		&utmi_p1_gfclk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_utmi_p1_clk",	&usb_host_hs_utmi_p1_clk,	CK_443X),
 	CLK(NULL,	"utmi_p2_gfclk",		&utmi_p2_gfclk,	CK_443X),
@@ -3209,14 +3210,18 @@ static struct omap_clk omap44xx_clks[] = {
 	CLK(NULL,	"usb_host_hs_hsic480m_p2_clk",	&usb_host_hs_hsic480m_p2_clk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_func48mclk",	&usb_host_hs_func48mclk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_fck",		&usb_host_hs_fck,	CK_443X),
+	CLK("ehci-omap.0",	"hs_fck",		&usb_host_hs_fck,	CK_443X),
+	CLK("ehci-omap.0",	"usbhost_ick",		&dummy_ck,		CK_443X),
 	CLK(NULL,	"otg_60m_gfclk",		&otg_60m_gfclk,	CK_443X),
 	CLK(NULL,	"usb_otg_hs_xclk",		&usb_otg_hs_xclk,	CK_443X),
-	CLK("musb_hdrc",	"ick",				&usb_otg_hs_ick,	CK_443X),
+	CLK("musb-omap2430",	"ick",				&usb_otg_hs_ick,	CK_443X),
 	CLK(NULL,	"usb_phy_cm_clk32k",		&usb_phy_cm_clk32k,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch2_clk",	&usb_tll_hs_usb_ch2_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch0_clk",	&usb_tll_hs_usb_ch0_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch1_clk",	&usb_tll_hs_usb_ch1_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_ick",		&usb_tll_hs_ick,	CK_443X),
+	CLK("ehci-omap.0",	"usbtll_ick",		&usb_tll_hs_ick,	CK_443X),
+	CLK("ehci-omap.0",	"usbtll_fck",		&dummy_ck,	CK_443X),
 	CLK(NULL,	"usim_ck",			&usim_ck,	CK_443X),
 	CLK(NULL,	"usim_fclk",			&usim_fclk,	CK_443X),
 	CLK(NULL,	"usim_fck",			&usim_fck,	CK_443X),

+ 149 - 0
arch/arm/mach-omap2/omap_phy_internal.c

@@ -0,0 +1,149 @@
+/*
+  * This file configures the internal USB PHY in OMAP4430. Used
+  * with TWL6030 transceiver and MUSB on OMAP4430.
+  *
+  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * Author: Hema HK <hemahk@ti.com>
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+  *
+  */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/usb.h>
+
+#include <plat/usb.h>
+
+/* OMAP control module register for UTMI PHY */
+#define CONTROL_DEV_CONF		0x300
+#define PHY_PD				0x1
+
+#define USBOTGHS_CONTROL		0x33c
+#define	AVALID				BIT(0)
+#define	BVALID				BIT(1)
+#define	VBUSVALID			BIT(2)
+#define	SESSEND				BIT(3)
+#define	IDDIG				BIT(4)
+
+static struct clk *phyclk, *clk48m, *clk32k;
+static void __iomem *ctrl_base;
+
+int omap4430_phy_init(struct device *dev)
+{
+	ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
+	if (!ctrl_base) {
+		dev_err(dev, "control module ioremap failed\n");
+		return -ENOMEM;
+	}
+	/* Power down the phy */
+	__raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+	phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
+
+	if (IS_ERR(phyclk)) {
+		dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n");
+		iounmap(ctrl_base);
+		return PTR_ERR(phyclk);
+	}
+
+	clk48m = clk_get(dev, "ocp2scp_usb_phy_phy_48m");
+	if (IS_ERR(clk48m)) {
+		dev_err(dev, "cannot clk_get ocp2scp_usb_phy_phy_48m\n");
+		clk_put(phyclk);
+		iounmap(ctrl_base);
+		return PTR_ERR(clk48m);
+	}
+
+	clk32k = clk_get(dev, "usb_phy_cm_clk32k");
+	if (IS_ERR(clk32k)) {
+		dev_err(dev, "cannot clk_get usb_phy_cm_clk32k\n");
+		clk_put(phyclk);
+		clk_put(clk48m);
+		iounmap(ctrl_base);
+		return PTR_ERR(clk32k);
+	}
+	return 0;
+}
+
+int omap4430_phy_set_clk(struct device *dev, int on)
+{
+	static int state;
+
+	if (on && !state) {
+		/* Enable the phy clocks */
+		clk_enable(phyclk);
+		clk_enable(clk48m);
+		clk_enable(clk32k);
+		state = 1;
+	} else if (state) {
+		/* Disable the phy clocks */
+		clk_disable(phyclk);
+		clk_disable(clk48m);
+		clk_disable(clk32k);
+		state = 0;
+	}
+	return 0;
+}
+
+int omap4430_phy_power(struct device *dev, int ID, int on)
+{
+	if (on) {
+		/* enabled the clocks */
+		omap4430_phy_set_clk(dev, 1);
+		/* power on the phy */
+		if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) {
+			__raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+			mdelay(200);
+		}
+		if (ID)
+			/* enable VBUS valid, IDDIG groung */
+			__raw_writel(AVALID | VBUSVALID, ctrl_base +
+							USBOTGHS_CONTROL);
+		else
+			/*
+			 * Enable VBUS Valid, AValid and IDDIG
+			 * high impedence
+			 */
+			__raw_writel(IDDIG | AVALID | VBUSVALID,
+						ctrl_base + USBOTGHS_CONTROL);
+	} else {
+		/* Enable session END and IDIG to high impedence. */
+		__raw_writel(SESSEND | IDDIG, ctrl_base +
+					USBOTGHS_CONTROL);
+		/* Disable the clocks */
+		omap4430_phy_set_clk(dev, 0);
+		/* Power down the phy */
+		__raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+	}
+
+	return 0;
+}
+
+int omap4430_phy_exit(struct device *dev)
+{
+	if (ctrl_base)
+		iounmap(ctrl_base);
+	if (phyclk)
+		clk_put(phyclk);
+	if (clk48m)
+		clk_put(clk48m);
+	if (clk32k)
+		clk_put(clk32k);
+
+	return 0;
+}

+ 136 - 8
arch/arm/mach-omap2/usb-ehci.c

@@ -34,22 +34,15 @@
 
 static struct resource ehci_resources[] = {
 	{
-		.start	= OMAP34XX_EHCI_BASE,
-		.end	= OMAP34XX_EHCI_BASE + SZ_1K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.start	= OMAP34XX_UHH_CONFIG_BASE,
-		.end	= OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.start	= OMAP34XX_USBTLL_BASE,
-		.end	= OMAP34XX_USBTLL_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{         /* general IRQ */
-		.start   = INT_34XX_EHCI_IRQ,
 		.flags   = IORESOURCE_IRQ,
 	}
 };
@@ -214,13 +207,148 @@ static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
 	return;
 }
 
+static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
+{
+	switch (port_mode[0]) {
+	case EHCI_HCD_OMAP_MODE_PHY:
+		omap_mux_init_signal("usbb1_ulpiphy_stp",
+			OMAP_PIN_OUTPUT);
+		omap_mux_init_signal("usbb1_ulpiphy_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_TLL:
+		omap_mux_init_signal("usbb1_ulpitll_stp",
+			OMAP_PIN_INPUT_PULLUP);
+		omap_mux_init_signal("usbb1_ulpitll_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_UNKNOWN:
+	default:
+			break;
+	}
+	switch (port_mode[1]) {
+	case EHCI_HCD_OMAP_MODE_PHY:
+		omap_mux_init_signal("usbb2_ulpiphy_stp",
+			OMAP_PIN_OUTPUT);
+		omap_mux_init_signal("usbb2_ulpiphy_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_TLL:
+		omap_mux_init_signal("usbb2_ulpitll_stp",
+			OMAP_PIN_INPUT_PULLUP);
+		omap_mux_init_signal("usbb2_ulpitll_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_UNKNOWN:
+	default:
+			break;
+	}
+}
+
 void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
 {
 	platform_device_add_data(&ehci_device, pdata, sizeof(*pdata));
 
 	/* Setup Pin IO MUX for EHCI */
-	if (cpu_is_omap34xx())
+	if (cpu_is_omap34xx()) {
+		ehci_resources[0].start	= OMAP34XX_EHCI_BASE;
+		ehci_resources[0].end	= OMAP34XX_EHCI_BASE + SZ_1K - 1;
+		ehci_resources[1].start	= OMAP34XX_UHH_CONFIG_BASE;
+		ehci_resources[1].end	= OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
+		ehci_resources[2].start	= OMAP34XX_USBTLL_BASE;
+		ehci_resources[2].end	= OMAP34XX_USBTLL_BASE + SZ_4K - 1;
+		ehci_resources[3].start = INT_34XX_EHCI_IRQ;
 		setup_ehci_io_mux(pdata->port_mode);
+	} else if (cpu_is_omap44xx()) {
+		ehci_resources[0].start	= OMAP44XX_HSUSB_EHCI_BASE;
+		ehci_resources[0].end	= OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
+		ehci_resources[1].start	= OMAP44XX_UHH_CONFIG_BASE;
+		ehci_resources[1].end	= OMAP44XX_UHH_CONFIG_BASE + SZ_2K - 1;
+		ehci_resources[2].start	= OMAP44XX_USBTLL_BASE;
+		ehci_resources[2].end	= OMAP44XX_USBTLL_BASE + SZ_4K - 1;
+		ehci_resources[3].start = OMAP44XX_IRQ_EHCI;
+		setup_4430ehci_io_mux(pdata->port_mode);
+	}
 
 	if (platform_device_register(&ehci_device) < 0) {
 		printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n");

+ 102 - 2
arch/arm/mach-omap2/usb-musb.c

@@ -30,8 +30,101 @@
 #include <mach/irqs.h>
 #include <mach/am35xx.h>
 #include <plat/usb.h>
+#include "control.h"
 
-#ifdef CONFIG_USB_MUSB_SOC
+#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X)
+
+static void am35x_musb_reset(void)
+{
+	u32	regval;
+
+	/* Reset the musb interface */
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+	regval |= AM35XX_USBOTGSS_SW_RST;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+	regval &= ~AM35XX_USBOTGSS_SW_RST;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+}
+
+static void am35x_musb_phy_power(u8 on)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+	u32 devconf2;
+
+	if (on) {
+		/*
+		 * Start the on-chip PHY and its PLL.
+		 */
+		devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+		devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
+		devconf2 |= CONF2_PHY_PLLON;
+
+		omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+
+		pr_info(KERN_INFO "Waiting for PHY clock good...\n");
+		while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
+				& CONF2_PHYCLKGD)) {
+			cpu_relax();
+
+			if (time_after(jiffies, timeout)) {
+				pr_err(KERN_ERR "musb PHY clock good timed out\n");
+				break;
+			}
+		}
+	} else {
+		/*
+		 * Power down the on-chip PHY.
+		 */
+		devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+		devconf2 &= ~CONF2_PHY_PLLON;
+		devconf2 |=  CONF2_PHYPWRDN | CONF2_OTGPWRDN;
+		omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+	}
+}
+
+static void am35x_musb_clear_irq(void)
+{
+	u32 regval;
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval |= AM35XX_USBOTGSS_INT_CLR;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+static void am35x_musb_set_mode(u8 musb_mode)
+{
+	u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+	devconf2 &= ~CONF2_OTGMODE;
+	switch (musb_mode) {
+#ifdef	CONFIG_USB_MUSB_HDRC_HCD
+	case MUSB_HOST:		/* Force VBUS valid, ID = 0 */
+		devconf2 |= CONF2_FORCE_HOST;
+		break;
+#endif
+#ifdef	CONFIG_USB_GADGET_MUSB_HDRC
+	case MUSB_PERIPHERAL:	/* Force VBUS valid, ID = 1 */
+		devconf2 |= CONF2_FORCE_DEVICE;
+		break;
+#endif
+#ifdef	CONFIG_USB_MUSB_OTG
+	case MUSB_OTG:		/* Don't override the VBUS/ID comparators */
+		devconf2 |= CONF2_NO_OVERRIDE;
+		break;
+#endif
+	default:
+		pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
+	}
+
+	omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+}
 
 static struct resource musb_resources[] = {
 	[0] = { /* start and end set dynamically */
@@ -40,10 +133,12 @@ static struct resource musb_resources[] = {
 	[1] = {	/* general IRQ */
 		.start	= INT_243X_HS_USB_MC,
 		.flags	= IORESOURCE_IRQ,
+		.name	= "mc",
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= INT_243X_HS_USB_DMA,
 		.flags	= IORESOURCE_IRQ,
+		.name	= "dma",
 	},
 };
 
@@ -75,7 +170,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-omap2430",
 	.id		= -1,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -91,8 +186,13 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
 	if (cpu_is_omap243x()) {
 		musb_resources[0].start = OMAP243X_HS_BASE;
 	} else if (cpu_is_omap3517() || cpu_is_omap3505()) {
+		musb_device.name = "musb-am35x";
 		musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE;
 		musb_resources[1].start = INT_35XX_USBOTG_IRQ;
+		board_data->set_phy_power = am35x_musb_phy_power;
+		board_data->clear_irq = am35x_musb_clear_irq;
+		board_data->set_mode = am35x_musb_set_mode;
+		board_data->reset = am35x_musb_reset;
 	} else if (cpu_is_omap34xx()) {
 		musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
 	} else if (cpu_is_omap44xx()) {

+ 1 - 1
arch/arm/mach-omap2/usb-tusb6010.c

@@ -224,7 +224,7 @@ static struct resource tusb_resources[] = {
 static u64 tusb_dmamask = ~(u32)0;
 
 static struct platform_device tusb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-tusb",
 	.id		= -1,
 	.dev = {
 		.dma_mask		= &tusb_dmamask,

+ 5 - 0
arch/arm/plat-omap/include/plat/omap44xx.h

@@ -52,5 +52,10 @@
 #define OMAP4_MMU1_BASE			0x55082000
 #define OMAP4_MMU2_BASE			0x4A066000
 
+#define OMAP44XX_USBTLL_BASE		(L4_44XX_BASE + 0x62000)
+#define OMAP44XX_UHH_CONFIG_BASE	(L4_44XX_BASE + 0x64000)
+#define OMAP44XX_HSUSB_OHCI_BASE	(L4_44XX_BASE + 0x64800)
+#define OMAP44XX_HSUSB_EHCI_BASE	(L4_44XX_BASE + 0x64C00)
+
 #endif /* __ASM_ARCH_OMAP44XX_H */
 

+ 10 - 0
arch/arm/plat-omap/include/plat/usb.h

@@ -11,6 +11,7 @@ enum ehci_hcd_omap_mode {
 	EHCI_HCD_OMAP_MODE_UNKNOWN,
 	EHCI_HCD_OMAP_MODE_PHY,
 	EHCI_HCD_OMAP_MODE_TLL,
+	EHCI_HCD_OMAP_MODE_HSIC,
 };
 
 enum ohci_omap3_port_mode {
@@ -69,6 +70,10 @@ struct omap_musb_board_data {
 	u8	mode;
 	u16	power;
 	unsigned extvbus:1;
+	void	(*set_phy_power)(u8 on);
+	void	(*clear_irq)(void);
+	void	(*set_mode)(u8 mode);
+	void	(*reset)(void);
 };
 
 enum musb_interface    {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
@@ -79,6 +84,11 @@ extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata);
 
 extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata);
 
+extern int omap4430_phy_power(struct device *dev, int ID, int on);
+extern int omap4430_phy_set_clk(struct device *dev, int on);
+extern int omap4430_phy_init(struct device *dev);
+extern int omap4430_phy_exit(struct device *dev);
+
 #endif
 
 

+ 1 - 1
arch/blackfin/mach-bf527/boards/ad7160eval.c

@@ -83,7 +83,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 1
arch/blackfin/mach-bf527/boards/cm_bf527.c

@@ -82,11 +82,13 @@ static struct resource musb_resources[] = {
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -118,7 +120,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 1
arch/blackfin/mach-bf527/boards/ezbrd.c

@@ -46,11 +46,13 @@ static struct resource musb_resources[] = {
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -82,7 +84,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 1
arch/blackfin/mach-bf527/boards/ezkit.c

@@ -86,11 +86,13 @@ static struct resource musb_resources[] = {
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -122,7 +124,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 1 - 1
arch/blackfin/mach-bf527/boards/tll6527m.c

@@ -91,7 +91,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 1
arch/blackfin/mach-bf548/boards/cm_bf548.c

@@ -482,11 +482,13 @@ static struct resource musb_resources[] = {
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -518,7 +520,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 1
arch/blackfin/mach-bf548/boards/ezkit.c

@@ -587,11 +587,13 @@ static struct resource musb_resources[] = {
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -623,7 +625,7 @@ static struct musb_hdrc_platform_data musb_plat = {
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,

+ 3 - 3
arch/ia64/kernel/perfmon.c

@@ -1542,7 +1542,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
  * any operations on the root directory. However, we need a non-trivial
  * d_name - pfm: will go nicely and kill the special-casing in procfs.
  */
-static struct vfsmount *pfmfs_mnt;
+static struct vfsmount *pfmfs_mnt __read_mostly;
 
 static int __init
 init_pfm_fs(void)
@@ -2185,7 +2185,7 @@ static const struct file_operations pfm_file_ops = {
 };
 
 static int
-pfmfs_delete_dentry(struct dentry *dentry)
+pfmfs_delete_dentry(const struct dentry *dentry)
 {
 	return 1;
 }
@@ -2233,7 +2233,7 @@ pfm_alloc_file(pfm_context_t *ctx)
 	}
 	path.mnt = mntget(pfmfs_mnt);
 
-	path.dentry->d_op = &pfmfs_dentry_operations;
+	d_set_d_op(path.dentry, &pfmfs_dentry_operations);
 	d_add(path.dentry, inode);
 
 	file = alloc_file(&path, FMODE_READ, &pfm_file_ops);

+ 12 - 6
arch/powerpc/platforms/cell/spufs/inode.c

@@ -71,12 +71,18 @@ spufs_alloc_inode(struct super_block *sb)
 	return &ei->vfs_inode;
 }
 
-static void
-spufs_destroy_inode(struct inode *inode)
+static void spufs_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
 }
 
+static void spufs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, spufs_i_callback);
+}
+
 static void
 spufs_init_once(void *p)
 {
@@ -159,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir)
 
 	mutex_lock(&dir->d_inode->i_mutex);
 	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
-		spin_lock(&dcache_lock);
 		spin_lock(&dentry->d_lock);
 		if (!(d_unhashed(dentry)) && dentry->d_inode) {
-			dget_locked(dentry);
+			dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
 			simple_unlink(dir->d_inode, dentry);
-			spin_unlock(&dcache_lock);
+			/* XXX: what was dcache_lock protecting here? Other
+			 * filesystems (IB, configfs) release dcache_lock
+			 * before unlink */
 			dput(dentry);
 		} else {
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
 		}
 	}
 	shrink_dcache_parent(dir);

+ 5 - 0
arch/sh/Kconfig

@@ -349,6 +349,7 @@ config CPU_SUBTYPE_SH7720
 	select CPU_HAS_DSP
 	select SYS_SUPPORTS_CMT
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
 
@@ -357,6 +358,7 @@ config CPU_SUBTYPE_SH7721
 	select CPU_SH3
 	select CPU_HAS_DSP
 	select SYS_SUPPORTS_CMT
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7721 if you have a SH3-DSP SH7721 CPU.
 
@@ -437,6 +439,7 @@ config CPU_SUBTYPE_SH7757
 config CPU_SUBTYPE_SH7763
 	bool "Support SH7763 processor"
 	select CPU_SH4A
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
 
@@ -463,6 +466,8 @@ config CPU_SUBTYPE_SH7786
 	select CPU_HAS_PTEAEX
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select USB_ARCH_HAS_OHCI
+	select USB_ARCH_HAS_EHCI
 
 config CPU_SUBTYPE_SHX3
 	bool "Support SH-X3 processor"

+ 31 - 4
arch/sh/kernel/cpu/sh4a/setup-sh7786.c

@@ -522,10 +522,37 @@ static struct platform_device dma0_device = {
 	},
 };
 
+#define USB_EHCI_START 0xffe70000
+#define USB_OHCI_START 0xffe70400
+
+static struct resource usb_ehci_resources[] = {
+	[0] = {
+		.start	= USB_EHCI_START,
+		.end	= USB_EHCI_START + 0x3ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= 77,
+		.end	= 77,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device usb_ehci_device = {
+	.name		= "sh_ehci",
+	.id		= -1,
+	.dev = {
+		.dma_mask		= &usb_ehci_device.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(usb_ehci_resources),
+	.resource	= usb_ehci_resources,
+};
+
 static struct resource usb_ohci_resources[] = {
 	[0] = {
-		.start	= 0xffe70400,
-		.end	= 0xffe704ff,
+		.start	= USB_OHCI_START,
+		.end	= USB_OHCI_START + 0x3ff,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -535,12 +562,11 @@ static struct resource usb_ohci_resources[] = {
 	},
 };
 
-static u64 usb_ohci_dma_mask = DMA_BIT_MASK(32);
 static struct platform_device usb_ohci_device = {
 	.name		= "sh_ohci",
 	.id		= -1,
 	.dev = {
-		.dma_mask		= &usb_ohci_dma_mask,
+		.dma_mask		= &usb_ohci_device.dev.coherent_dma_mask,
 		.coherent_dma_mask	= DMA_BIT_MASK(32),
 	},
 	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
@@ -570,6 +596,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
 
 static struct platform_device *sh7786_devices[] __initdata = {
 	&dma0_device,
+	&usb_ehci_device,
 	&usb_ohci_device,
 };
 

+ 1 - 0
drivers/hid/hid-core.c

@@ -1604,6 +1604,7 @@ static const struct hid_device_id hid_ignore_list[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },

+ 2 - 0
drivers/hid/hid-ids.h

@@ -200,6 +200,8 @@
 #define USB_VENDOR_ID_ELECOM		0x056e
 #define USB_DEVICE_ID_ELECOM_BM084	0x0061
 
+#define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
+
 #define USB_VENDOR_ID_ELO		0x04E7
 #define USB_DEVICE_ID_ELO_TS2700	0x0020
 

+ 2 - 6
drivers/infiniband/hw/ipath/ipath_fs.c

@@ -277,18 +277,14 @@ static int remove_file(struct dentry *parent, char *name)
 		goto bail;
 	}
 
-	spin_lock(&dcache_lock);
 	spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
-		dget_locked(tmp);
+		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 		simple_unlink(parent->d_inode, tmp);
-	} else {
+	} else
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
-	}
 
 	ret = 0;
 bail:

+ 1 - 4
drivers/infiniband/hw/qib/qib_fs.c

@@ -453,17 +453,14 @@ static int remove_file(struct dentry *parent, char *name)
 		goto bail;
 	}
 
-	spin_lock(&dcache_lock);
 	spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
-		dget_locked(tmp);
+		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 		simple_unlink(parent->d_inode, tmp);
 	} else {
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 	}
 
 	ret = 0;

+ 1 - 1
drivers/input/keyboard/tc3589x-keypad.c

@@ -469,4 +469,4 @@ module_exit(tc3589x_keypad_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
 MODULE_DESCRIPTION("TC35893 Keypad Driver");
-MODULE_ALIAS("platform:tc3589x-keypad")
+MODULE_ALIAS("platform:tc3589x-keypad");

+ 2 - 1
drivers/media/video/tlg2300/pd-main.c

@@ -452,7 +452,8 @@ static int poseidon_probe(struct usb_interface *interface,
 
 	device_init_wakeup(&udev->dev, 1);
 #ifdef CONFIG_PM
-	pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY;
+	pm_runtime_set_autosuspend_delay(&pd->udev->dev,
+			1000 * PM_SUSPEND_DELAY);
 	usb_enable_autosuspend(pd->udev);
 
 	if (in_hibernation(pd)) {

+ 39 - 5
drivers/mfd/twl-core.c

@@ -95,7 +95,8 @@
 #define twl_has_rtc()	false
 #endif
 
-#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE)
+#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) ||\
+	defined(CONFIG_TWL6030_USB) || defined(CONFIG_TWL6030_USB_MODULE)
 #define twl_has_usb()	true
 #else
 #define twl_has_usb()	false
@@ -682,6 +683,43 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
 			usb3v1.dev = child;
 		}
 	}
+	if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
+
+		static struct regulator_consumer_supply usb3v3 = {
+			.supply =	"vusb",
+		};
+
+		if (twl_has_regulator()) {
+			/* this is a template that gets copied */
+			struct regulator_init_data usb_fixed = {
+				.constraints.valid_modes_mask =
+					REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+				.constraints.valid_ops_mask =
+					REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+			};
+
+			child = add_regulator_linked(TWL6030_REG_VUSB,
+						      &usb_fixed, &usb3v3, 1);
+			if (IS_ERR(child))
+				return PTR_ERR(child);
+		}
+
+		child = add_child(0, "twl6030_usb",
+			pdata->usb, sizeof(*pdata->usb),
+			true,
+			/* irq1 = VBUS_PRES, irq0 = USB ID */
+			pdata->irq_base + USBOTG_INTR_OFFSET,
+			pdata->irq_base + USB_PRES_INTR_OFFSET);
+
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+		/* we need to connect regulators to this transceiver */
+		if (twl_has_regulator() && child)
+			usb3v3.dev = child;
+
+	}
 
 	if (twl_has_watchdog()) {
 		child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
@@ -815,10 +853,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
 		if (IS_ERR(child))
 			return PTR_ERR(child);
 
-		child = add_regulator(TWL6030_REG_VUSB, pdata->vusb);
-		if (IS_ERR(child))
-			return PTR_ERR(child);
-
 		child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
 		if (IS_ERR(child))
 			return PTR_ERR(child);

+ 8 - 1
drivers/mfd/twl6030-irq.c

@@ -74,7 +74,7 @@ static int twl6030_interrupt_mapping[24] = {
 	USBOTG_INTR_OFFSET,	/* Bit 16	ID_WKUP			*/
 	USBOTG_INTR_OFFSET,	/* Bit 17	VBUS_WKUP		*/
 	USBOTG_INTR_OFFSET,	/* Bit 18	ID			*/
-	USBOTG_INTR_OFFSET,	/* Bit 19	VBUS			*/
+	USB_PRES_INTR_OFFSET,	/* Bit 19	VBUS			*/
 	CHARGER_INTR_OFFSET,	/* Bit 20	CHRG_CTRL		*/
 	CHARGER_INTR_OFFSET,	/* Bit 21	EXT_CHRG		*/
 	CHARGER_INTR_OFFSET,	/* Bit 22	INT_CHRG		*/
@@ -128,6 +128,13 @@ static int twl6030_irq_thread(void *data)
 
 		sts.bytes[3] = 0; /* Only 24 bits are valid*/
 
+		/*
+		 * Since VBUS status bit is not reliable for VBUS disconnect
+		 * use CHARGER VBUS detection status bit instead.
+		 */
+		if (sts.bytes[2] & 0x10)
+			sts.bytes[2] |= 0x08;
+
 		for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
 			local_irq_disable();
 			if (sts.int_sts & 0x1) {

+ 1 - 1
drivers/mtd/mtdchar.c

@@ -1201,7 +1201,7 @@ err_unregister_chdev:
 static void __exit cleanup_mtdchar(void)
 {
 	unregister_mtd_user(&mtdchar_notifier);
-	mntput(mtd_inode_mnt);
+	mntput_long(mtd_inode_mnt);
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }

+ 1 - 1
drivers/net/wimax/i2400m/usb.c

@@ -514,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
 #ifdef CONFIG_PM
 	iface->needs_remote_wakeup = 1;		/* autosuspend (15s delay) */
 	device_init_wakeup(dev, 1);
-	usb_dev->autosuspend_delay = 15 * HZ;
+	pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
 	usb_enable_autosuspend(usb_dev);
 #endif
 

+ 4 - 5
drivers/s390/scsi/zfcp_aux.c

@@ -45,8 +45,8 @@ static char *init_device;
 module_param_named(device, init_device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
-static struct kmem_cache *zfcp_cache_hw_align(const char *name,
-					      unsigned long size)
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+						      unsigned long size)
 {
 	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
 }
@@ -311,8 +311,7 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 		if (zfcp_fsf_status_read(adapter->qdio)) {
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
-				zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
-							NULL);
+				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
@@ -459,7 +458,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 
 	zfcp_erp_thread_kill(adapter);
-	zfcp_dbf_adapter_unregister(adapter->dbf);
+	zfcp_dbf_adapter_unregister(adapter);
 	zfcp_qdio_destroy(adapter->qdio);
 
 	zfcp_ccw_adapter_put(adapter); /* final put to release */

+ 7 - 7
drivers/s390/scsi/zfcp_ccw.c

@@ -48,7 +48,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"ccresu2", NULL);
+				"ccresu2");
 	zfcp_erp_wait(adapter);
 	flush_work(&adapter->scan_work);
 
@@ -182,7 +182,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
 	if (!adapter)
 		return 0;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
 	zfcp_erp_wait(adapter);
 
 	zfcp_ccw_adapter_put(adapter);
@@ -207,24 +207,24 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 	switch (event) {
 	case CIO_GONE:
 		dev_warn(&cdev->dev, "The FCP device has been detached\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
 		break;
 	case CIO_NO_PATH:
 		dev_warn(&cdev->dev,
 			 "The CHPID for the FCP device is offline\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
 		break;
 	case CIO_OPER:
 		dev_info(&cdev->dev, "The FCP device is operational again\n");
 		zfcp_erp_set_adapter_status(adapter,
 					    ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-					"ccnoti4", NULL);
+					"ccnoti4");
 		break;
 	case CIO_BOXED:
 		dev_warn(&cdev->dev, "The FCP device did not respond within "
 				     "the specified time\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
 		break;
 	}
 
@@ -243,7 +243,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
 	if (!adapter)
 		return;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
 	zfcp_erp_wait(adapter);
 	zfcp_erp_thread_kill(adapter);
 

+ 4 - 4
drivers/s390/scsi/zfcp_cfdc.c

@@ -288,7 +288,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "cfaac_1", NULL);
+					     "cfaac_1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
@@ -299,7 +299,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_lun_reopen(sdev,
 					    ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "cfaac_2", NULL);
+					    "cfaac_2");
 	}
 }
 
@@ -426,7 +426,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
 		return -EACCES;
 	}
 
@@ -437,7 +437,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
 		return -EACCES;
 	}
 

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 269 - 857
drivers/s390/scsi/zfcp_dbf.c


+ 258 - 239
drivers/s390/scsi/zfcp_dbf.h

@@ -1,22 +1,8 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
+ * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corp. 2008, 2010
  */
 
 #ifndef ZFCP_DBF_H
@@ -27,322 +13,350 @@
 #include "zfcp_fsf.h"
 #include "zfcp_def.h"
 
-#define ZFCP_DBF_TAG_SIZE      4
-#define ZFCP_DBF_ID_SIZE       7
+#define ZFCP_DBF_TAG_LEN       7
 
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 
-struct zfcp_dbf_dump {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u32 total_size;		/* size of total dump data */
-	u32 offset;		/* how much data has being already dumped */
-	u32 size;		/* how much data comes with this record */
-	u8 data[];		/* dump data */
-} __attribute__ ((packed));
-
-struct zfcp_dbf_rec_record_thread {
-	u32 total;
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
 	u32 ready;
 	u32 running;
-};
-
-struct zfcp_dbf_rec_record_target {
-	u64 ref;
-	u32 status;
-	u32 d_id;
-	u64 wwpn;
-	u64 fcp_lun;
-	u32 erp_count;
-};
-
-struct zfcp_dbf_rec_record_trigger {
 	u8 want;
 	u8 need;
-	u32 as;
-	u32 ps;
-	u32 ls;
-	u64 ref;
-	u64 action;
-	u64 wwpn;
-	u64 fcp_lun;
-};
+} __packed;
 
-struct zfcp_dbf_rec_record_action {
-	u32 status;
-	u32 step;
-	u64 action;
-	u64 fsf_req;
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+	u64 fsf_req_id;
+	u32 rec_status;
+	u16 rec_step;
+	u8 rec_action;
+	u8 rec_count;
+} __packed;
+
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+	ZFCP_DBF_REC_TRIG	= 1,
+	ZFCP_DBF_REC_RUN	= 2,
 };
 
-struct zfcp_dbf_rec_record {
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
 	u8 id;
-	char id2[7];
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 lun;
+	u64 wwpn;
+	u32 d_id;
+	u32 adapter_status;
+	u32 port_status;
+	u32 lun_status;
 	union {
-		struct zfcp_dbf_rec_record_action action;
-		struct zfcp_dbf_rec_record_thread thread;
-		struct zfcp_dbf_rec_record_target target;
-		struct zfcp_dbf_rec_record_trigger trigger;
+		struct zfcp_dbf_rec_trigger trig;
+		struct zfcp_dbf_rec_running run;
 	} u;
-};
+} __packed;
 
-enum {
-	ZFCP_REC_DBF_ID_ACTION,
-	ZFCP_REC_DBF_ID_THREAD,
-	ZFCP_REC_DBF_ID_TARGET,
-	ZFCP_REC_DBF_ID_TRIGGER,
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+	ZFCP_DBF_SAN_REQ	= 1,
+	ZFCP_DBF_SAN_RES	= 2,
+	ZFCP_DBF_SAN_ELS	= 3,
 };
 
-struct zfcp_dbf_hba_record_response {
-	u32 fsf_command;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u32 fsf_prot_status;
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+} __packed;
+
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+	u64 req_issued;
+	u32 prot_status;
+	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
 	u32 fsf_status;
-	u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
-	u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-	u32 fsf_req_status;
-	u8 sbal_first;
-	u8 sbal_last;
-	u8 sbal_response;
-	u8 pool;
-	u64 erp_action;
-	union {
-		struct {
-			u64 cmnd;
-			u32 data_dir;
-		} fcp;
-		struct {
-			u64 wwpn;
-			u32 d_id;
-			u32 port_handle;
-		} port;
-		struct {
-			u64 wwpn;
-			u64 fcp_lun;
-			u32 port_handle;
-			u32 lun_handle;
-		} unit;
-		struct {
-			u32 d_id;
-		} els;
-	} u;
-} __attribute__ ((packed));
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+} __packed;
 
-struct zfcp_dbf_hba_record_status {
-	u8 failed;
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
 	u32 status_type;
 	u32 status_subtype;
-	struct fsf_queue_designator
-	 queue_designator;
-	u32 payload_size;
-#define ZFCP_DBF_UNSOL_PAYLOAD				80
-#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL		32
-#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD	56
-#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT	2 * sizeof(u32)
-	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record_qdio {
-	u32 qdio_error;
-	u8 sbal_index;
-	u8 sbal_count;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
-	union {
-		struct zfcp_dbf_hba_record_response response;
-		struct zfcp_dbf_hba_record_status status;
-		struct zfcp_dbf_hba_record_qdio qdio;
-		struct fsf_bit_error_payload berr;
-	} u;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_ct_request {
-	u16 cmd_req_code;
-	u8 revision;
-	u8 gs_type;
-	u8 gs_subtype;
-	u8 options;
-	u16 max_res_size;
-	u32 len;
-	u32 d_id;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_ct_response {
-	u16 cmd_rsp_code;
-	u8 revision;
-	u8 reason_code;
-	u8 expl;
-	u8 vendor_unique;
-	u16 max_res_size;
-	u32 len;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_els {
 	u32 d_id;
-} __attribute__ ((packed));
+	u64 lun;
+	u64 queue_designator;
+} __packed;
 
-struct zfcp_dbf_san_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u64 fsf_reqid;
-	u32 fsf_seqno;
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+	ZFCP_DBF_HBA_RES	= 1,
+	ZFCP_DBF_HBA_USS	= 2,
+	ZFCP_DBF_HBA_BIT	= 3,
+};
+
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 fsf_req_status;
+	u32 fsf_cmd;
+	u32 fsf_seq_no;
+	u16 pl_len;
 	union {
-		struct zfcp_dbf_san_record_ct_request ct_req;
-		struct zfcp_dbf_san_record_ct_response ct_resp;
-		struct zfcp_dbf_san_record_els els;
+		struct zfcp_dbf_hba_res res;
+		struct zfcp_dbf_hba_uss uss;
+		struct fsf_bit_error_payload be;
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+	ZFCP_DBF_SCSI_CMND	= 1,
+};
 
-struct zfcp_dbf_scsi_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
+ */
+struct zfcp_dbf_scsi {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
 	u32 scsi_id;
 	u32 scsi_lun;
 	u32 scsi_result;
-	u64 scsi_cmnd;
-#define ZFCP_DBF_SCSI_OPCODE	16
-	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
 	u8 scsi_retries;
 	u8 scsi_allowed;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u64 old_fsf_reqid;
-	u8 rsp_validity;
-	u8 rsp_scsi_status;
-	u32 rsp_resid;
-	u8 rsp_code;
-#define ZFCP_DBF_SCSI_FCP_SNS_INFO	16
-#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO	256
-	u32 sns_info_len;
-	u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
-} __attribute__ ((packed));
+	u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u64 fsf_req_id;
+	u64 host_scribble;
+	u16 pl_len;
+	struct fcp_resp_with_ext fcp_rsp;
+} __packed;
 
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+	u8 counter;
+	char area[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+	char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
 struct zfcp_dbf {
+	debug_info_t			*pay;
 	debug_info_t			*rec;
 	debug_info_t			*hba;
 	debug_info_t			*san;
 	debug_info_t			*scsi;
+	spinlock_t			pay_lock;
 	spinlock_t			rec_lock;
 	spinlock_t			hba_lock;
 	spinlock_t			san_lock;
 	spinlock_t			scsi_lock;
-	struct zfcp_dbf_rec_record	rec_buf;
-	struct zfcp_dbf_hba_record	hba_buf;
-	struct zfcp_dbf_san_record	san_buf;
-	struct zfcp_dbf_scsi_record	scsi_buf;
-	struct zfcp_adapter		*adapter;
+	struct zfcp_dbf_pay		pay_buf;
+	struct zfcp_dbf_rec		rec_buf;
+	struct zfcp_dbf_hba		hba_buf;
+	struct zfcp_dbf_san		san_buf;
+	struct zfcp_dbf_scsi		scsi_buf;
 };
 
 static inline
-void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
-			   struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
+	if (level <= req->adapter->dbf->hba->level)
+		zfcp_dbf_hba_fsf_res(tag, req);
 }
 
 /**
  * zfcp_dbf_hba_fsf_response - trace event for request completion
- * @fsf_req: request that has been completed
+ * @req: request that has been completed
  */
-static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
 {
-	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct fsf_qtcb *qtcb = req->qtcb;
 
 	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
-		zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
 	} else if (qtcb->header.fsf_status != FSF_GOOD) {
-		zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
 
 	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
 		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
-		zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
 
 	} else if (qtcb->header.log_length) {
-		zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
 
 	} else {
-		zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
 	}
- }
-
-/**
- * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
- * @tag: tag indicating which kind of unsolicited status has been received
- * @dbf: reference to dbf structure
- * @status_buffer: buffer containing payload of unsolicited status
- */
-static inline
-void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
-			    struct fsf_status_read_buffer *buf)
-{
-	int level = 2;
-
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
 }
 
 static inline
-void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
-		   struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-		   struct zfcp_fsf_req *req, unsigned long old_id)
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->scsi->level)
-		_zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					scmd->device->host->hostdata[0];
+
+	if (level <= adapter->dbf->scsi->level)
+		zfcp_dbf_scsi(tag, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  * @req: FSF request used to issue SCSI command
  */
 static inline
-void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-			  struct zfcp_fsf_req *req)
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
 {
 	if (scmd->result != 0)
-		zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
 	else if (scmd->retries > 0)
-		zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
 	else
-		zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  */
 static inline
-void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
 {
-	zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
+	_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
 }
 
 /**
  * zfcp_dbf_scsi_abort - trace event for SCSI command abort
  * @tag: tag indicating success or failure of abort operation
- * @adapter: adapter thas has been used to issue SCSI command to be aborted
  * @scmd: SCSI command to be aborted
- * @new_req: request containing abort (might be NULL)
- * @old_id: identifier of request containg SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
  */
 static inline
-void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
-			 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
-			 unsigned long old_id)
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+			 struct zfcp_fsf_req *fsf_req)
 {
-	zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
+	_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
 }
 
 /**
@@ -352,12 +366,17 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
  */
 static inline
-void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
 {
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+	char tmp_tag[ZFCP_DBF_TAG_LEN];
+
+	if (flag == FCP_TMF_TGT_RESET)
+		memcpy(tmp_tag, "tr_", 3);
+	else
+		memcpy(tmp_tag, "lr_", 3);
 
-	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
-		      zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
+	memcpy(&tmp_tag[3], tag, 4);
+	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
 }
 
 #endif /* ZFCP_DBF_H */

+ 59 - 82
drivers/s390/scsi/zfcp_erp.c

@@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
 	struct zfcp_adapter *adapter = act->adapter;
 
 	list_move(&act->list, &act->adapter->erp_ready_head);
-	zfcp_dbf_rec_action("erardy1", act);
+	zfcp_dbf_rec_run("erardy1", act);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("erardy2", adapter->dbf);
+	zfcp_dbf_rec_run("erardy2", act);
 }
 
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -236,10 +236,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
 static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 				   struct zfcp_port *port,
 				   struct scsi_device *sdev,
-				   char *id, void *ref, u32 act_status)
+				   char *id, u32 act_status)
 {
 	int retval = 1, need;
-	struct zfcp_erp_action *act = NULL;
+	struct zfcp_erp_action *act;
 
 	if (!adapter->erp_thread)
 		return -EIO;
@@ -255,15 +255,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	retval = 0;
  out:
-	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
+	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
 	return retval;
 }
 
 static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
-				    int clear_mask, char *id, void *ref)
+				    int clear_mask, char *id)
 {
 	zfcp_erp_adapter_block(adapter, clear_mask);
 	zfcp_scsi_schedule_rports_block(adapter);
@@ -275,7 +274,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
 		return -EIO;
 	}
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-				       adapter, NULL, NULL, id, ref, 0);
+				       adapter, NULL, NULL, id, 0);
 }
 
 /**
@@ -283,10 +282,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
  * @adapter: Adapter to reopen.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
-			     char *id, void *ref)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
 {
 	unsigned long flags;
 
@@ -299,7 +296,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
 					    ZFCP_STATUS_COMMON_ERP_FAILED);
 	else
 		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
-					NULL, NULL, id, ref, 0);
+					NULL, NULL, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -308,13 +305,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
  * @adapter: Adapter to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
 void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
-			       char *id, void *ref)
+			       char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
+	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
 }
 
 /**
@@ -322,13 +318,11 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
  * @port: Port to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
-			    void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_port_reopen(port, clear | flags, id, ref);
+	zfcp_erp_port_reopen(port, clear | flags, id);
 }
 
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -337,8 +331,8 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
 				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
 }
 
-static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
-					 int clear, char *id, void *ref)
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+					 char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -347,28 +341,26 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-				port->adapter, port, NULL, id, ref, 0);
+				port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_forced_reopen - Forced close of port and open again
  * @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_port_forced_reopen(port, clear, id, ref);
+	_zfcp_erp_port_forced_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -380,24 +372,25 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
 	}
 
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-				       port->adapter, port, NULL, id, ref, 0);
+				       port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_reopen - trigger remote port recovery
  * @port: port to recover
  * @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
  *
  * Returns 0 if recovery has been triggered, < 0 if not.
  */
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	int retval;
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	retval = _zfcp_erp_port_reopen(port, clear, id, ref);
+	retval = _zfcp_erp_port_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	return retval;
@@ -410,7 +403,7 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
 }
 
 static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-				 void *ref, u32 act_status)
+				 u32 act_status)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -421,17 +414,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
-				zfcp_sdev->port, sdev, id, ref, act_status);
+				zfcp_sdev->port, sdev, id, act_status);
 }
 
 /**
  * zfcp_erp_lun_reopen - initiate reopen of a LUN
  * @sdev: SCSI device / LUN to be reopened
  * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
  * Return: 0 on success, < 0 on error
  */
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-			 void *ref)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -439,7 +433,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -448,13 +442,11 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
  * @sdev: SCSI device / LUN to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
-			   void *ref)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
+	zfcp_erp_lun_reopen(sdev, clear | flags, id);
 }
 
 /**
@@ -476,7 +468,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
 	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	zfcp_erp_wait(adapter);
@@ -490,14 +482,14 @@ static int status_change_set(unsigned long mask, atomic_t *status)
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
-		zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
+		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
-		zfcp_dbf_rec_port("erpubl1", NULL, port);
+		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
@@ -506,14 +498,14 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
-		zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
+		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
 	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
-	zfcp_dbf_rec_action("erator1", erp_action);
+	zfcp_dbf_rec_run("erator1", erp_action);
 }
 
 static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -530,11 +522,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
 		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
 				   ZFCP_STATUS_ERP_TIMEDOUT)) {
 			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-			zfcp_dbf_rec_action("erscf_1", act);
+			zfcp_dbf_rec_run("erscf_1", act);
 			req->erp_action = NULL;
 		}
 		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
-			zfcp_dbf_rec_action("erscf_2", act);
+			zfcp_dbf_rec_run("erscf_2", act);
 		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
 			act->fsf_req_id = 0;
 	} else
@@ -585,40 +577,40 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
 }
 
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
-				      int clear, char *id, void *ref)
+				      int clear, char *id)
 {
 	struct zfcp_port *port;
 
 	read_lock(&adapter->port_list_lock);
 	list_for_each_entry(port, &adapter->port_list, list)
-		_zfcp_erp_port_reopen(port, clear, id, ref);
+		_zfcp_erp_port_reopen(port, clear, id);
 	read_unlock(&adapter->port_list_lock);
 }
 
 static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
-				     char *id, void *ref)
+				     char *id)
 {
 	struct scsi_device *sdev;
 
 	shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL);
+		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL);
+		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
-		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
 		break;
 	}
 }
@@ -627,13 +619,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL);
+		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
 		break;
 	}
 }
@@ -652,17 +644,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
 	read_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
-{
-	struct zfcp_qdio *qdio = act->adapter->qdio;
-
-	if (zfcp_qdio_open(qdio))
-		return ZFCP_ERP_FAILED;
-	init_waitqueue_head(&qdio->req_q_wq);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
-	return ZFCP_ERP_SUCCEEDED;
-}
-
 static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
 {
 	struct zfcp_port *port;
@@ -670,7 +651,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
 				 adapter->peer_d_id);
 	if (IS_ERR(port)) /* error or port already attached */
 		return;
-	_zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
+	_zfcp_erp_port_reopen(port, 0, "ereptp1");
 }
 
 static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -693,10 +674,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
 			return ZFCP_ERP_FAILED;
 		}
 
-		zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
 		wait_event(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head));
-		zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
 		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
 			break;
 
@@ -735,10 +714,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
 	if (ret)
 		return ZFCP_ERP_FAILED;
 
-	zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
+	zfcp_dbf_rec_run("erasox1", act);
 	wait_event(adapter->erp_ready_wq,
 		   !list_empty(&adapter->erp_ready_head));
-	zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
+	zfcp_dbf_rec_run("erasox2", act);
 	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
 		return ZFCP_ERP_FAILED;
 
@@ -788,7 +767,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
-	if (zfcp_erp_adapter_strategy_open_qdio(act)) {
+	if (zfcp_qdio_open(adapter->qdio)) {
 		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
@@ -1166,7 +1145,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
 			_zfcp_erp_adapter_reopen(adapter,
 						 ZFCP_STATUS_COMMON_ERP_FAILED,
-						 "ersscg1", NULL);
+						 "ersscg1");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1176,7 +1155,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
 			_zfcp_erp_port_reopen(port,
 					      ZFCP_STATUS_COMMON_ERP_FAILED,
-					      "ersscg2", NULL);
+					      "ersscg2");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1186,7 +1165,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
 			_zfcp_erp_lun_reopen(sdev,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "ersscg3", NULL, 0);
+					     "ersscg3", 0);
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1206,7 +1185,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 	}
 
 	list_del(&erp_action->list);
-	zfcp_dbf_rec_action("eractd1", erp_action);
+	zfcp_dbf_rec_run("eractd1", erp_action);
 
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
@@ -1313,7 +1292,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
 			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
 		}
 		if (adapter->erp_total_count == adapter->erp_low_mem_count)
-			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
+			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
 		else {
 			zfcp_erp_strategy_memwait(erp_action);
 			retval = ZFCP_ERP_CONTINUES;
@@ -1357,11 +1336,9 @@ static int zfcp_erp_thread(void *data)
 	unsigned long flags;
 
 	for (;;) {
-		zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
 		wait_event_interruptible(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head) ||
 			   kthread_should_stop());
-		zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
 
 		if (kthread_should_stop())
 			break;

+ 20 - 32
drivers/s390/scsi/zfcp_ext.h

@@ -45,47 +45,33 @@ extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
 
 /* zfcp_dbf.c */
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
-extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
-extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
-extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
-				 struct zfcp_adapter *, struct zfcp_port *,
-				 struct scsi_device *);
-extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
-extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
-				       struct zfcp_dbf *);
-extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
-					  struct fsf_status_read_buffer *);
-extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
-extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
-extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
-			   struct scsi_cmnd *, struct zfcp_fsf_req *,
-			   unsigned long);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
 
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
-extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
-				      void *);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
 extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
-					void *);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
-extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
-extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
 extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
@@ -149,6 +135,8 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
+extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
+					     struct qdio_buffer *);
 
 /* zfcp_scsi.c */
 extern struct zfcp_data zfcp_data;

+ 10 - 10
drivers/s390/scsi/zfcp_fc.c

@@ -174,7 +174,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 		if (!port->d_id)
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1", NULL);
+					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -215,7 +215,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->wwpn == wwpn) {
-			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -251,7 +251,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
 		(struct fsf_status_read_buffer *) fsf_req->data;
 	unsigned int els_type = status_buffer->payload.data[0];
 
-	zfcp_dbf_san_incoming_els(fsf_req);
+	zfcp_dbf_san_in_els("fciels1", fsf_req);
 	if (els_type == ELS_PLOGI)
 		zfcp_fc_incoming_plogi(fsf_req);
 	else if (els_type == ELS_LOGO)
@@ -360,7 +360,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 	ret = zfcp_fc_ns_gid_pn(port);
 	if (ret) {
 		/* could not issue gid_pn for some reason */
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
 		goto out;
 	}
 
@@ -369,7 +369,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 		goto out;
 	}
 
-	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
 out:
 	put_device(&port->dev);
 }
@@ -426,7 +426,7 @@ static void zfcp_fc_adisc_handler(void *data)
 	if (adisc->els.status) {
 		/* request rejected or timed out */
 		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "fcadh_1", NULL);
+					    "fcadh_1");
 		goto out;
 	}
 
@@ -436,7 +436,7 @@ static void zfcp_fc_adisc_handler(void *data)
 	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
 	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fcadh_2", NULL);
+				     "fcadh_2");
 		goto out;
 	}
 
@@ -507,7 +507,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
 
 	/* send of ADISC was not possible */
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
-	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
+	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
 	put_device(&port->dev);
@@ -659,7 +659,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
 		if (!IS_ERR(port))
-			zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
+			zfcp_erp_port_reopen(port, 0, "fcegpf1");
 		else if (PTR_ERR(port) != -EEXIST)
 			ret = PTR_ERR(port);
 	}
@@ -671,7 +671,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
 	write_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
-		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
 		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
 	}
 

+ 58 - 55
drivers/s390/scsi/zfcp_fsf.c

@@ -23,7 +23,7 @@ static void zfcp_fsf_request_timeout_handler(unsigned long data)
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
 	zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"fsrth_1", NULL);
+				"fsrth_1");
 }
 
 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -65,7 +65,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
 {
 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
 		"operational because of an unsupported FC class\n");
-	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
+	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
@@ -98,7 +98,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->d_id == d_id) {
-			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
+			zfcp_erp_port_reopen(port, 0, "fssrpc1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -211,13 +211,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 	struct fsf_status_read_buffer *sr_buf = req->data;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
+		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
 		mempool_free(sr_buf, adapter->pool.status_read_data);
 		zfcp_fsf_req_free(req);
 		return;
 	}
 
-	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
+	zfcp_dbf_hba_fsf_uss("fssrh_2", req);
 
 	switch (sr_buf->status_type) {
 	case FSF_STATUS_READ_PORT_CLOSED:
@@ -232,7 +232,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 		dev_warn(&adapter->ccw_device->dev,
 			 "The error threshold for checksum statistics "
 			 "has been exceeded\n");
-		zfcp_dbf_hba_berr(adapter->dbf, req);
+		zfcp_dbf_hba_bit_err("fssrh_3", req);
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -247,7 +247,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fssrh_2", req);
+					"fssrh_2");
 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
 
 		break;
@@ -287,7 +287,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
 			"The FCP adapter reported a problem "
 			"that cannot be recovered\n");
 		zfcp_qdio_siosl(req->adapter);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
 		break;
 	}
 	/* all non-return stats set FSFREQ_ERROR*/
@@ -304,7 +304,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
 		dev_err(&req->adapter->ccw_device->dev,
 			"The FCP adapter does not recognize the command 0x%x\n",
 			req->qtcb->header.fsf_command);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -335,17 +335,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 			"QTCB version 0x%x not supported by FCP adapter "
 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
 			psq->word[0], psq->word[1]);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
 		break;
 	case FSF_PROT_ERROR_STATE:
 	case FSF_PROT_SEQ_NUMB_ERROR:
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PROT_UNSUPP_QTCB_TYPE:
 		dev_err(&adapter->ccw_device->dev,
 			"The QTCB type is not supported by the FCP adapter\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -355,12 +355,12 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"0x%Lx is an ambiguous request identifier\n",
 			(unsigned long long)qtcb->bottom.support.req_handle);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
 		break;
 	case FSF_PROT_LINK_DOWN:
 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
 		/* go through reopen to flush pending requests */
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
 		break;
 	case FSF_PROT_REEST_QUEUE:
 		/* All ports should be marked as ready to run again */
@@ -369,14 +369,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fspse_8", req);
+					"fspse_8");
 		break;
 	default:
 		dev_err(&adapter->ccw_device->dev,
 			"0x%x is not a valid transfer protocol status\n",
 			qtcb->prefix.prot_status);
 		zfcp_qdio_siosl(adapter);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
 	}
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
@@ -482,7 +482,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"Unknown or unsupported arbitrated loop "
 			"fibre channel topology detected\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
 		return -EIO;
 	}
 
@@ -518,7 +518,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 				"FCP adapter maximum QTCB size (%d bytes) "
 				"is too small\n",
 				bottom->max_qtcb_size);
-			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
+			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -536,7 +536,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 			&qtcb->header.fsf_status_qual.link_down_info);
 		break;
 	default:
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
 		return;
 	}
 
@@ -552,14 +552,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports newer "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
 		return;
 	}
 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports older "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
 	}
 }
 
@@ -700,7 +700,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
 		del_timer(&req->timer);
 		/* lookup request again, list might have changed */
 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
-		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
 		return -EIO;
 	}
 
@@ -754,10 +754,11 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 	goto out;
 
 failed_req_send:
+	req->data = NULL;
 	mempool_free(sr_buf, adapter->pool.status_read_data);
 failed_buf:
+	zfcp_dbf_hba_fsf_uss("fssr__1", req);
 	zfcp_fsf_req_free(req);
-	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -776,14 +777,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 	case FSF_PORT_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
-						"fsafch1", req);
+						"fsafch1");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
-			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
-					     req);
+			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
@@ -794,14 +794,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fsafch4", req);
+				    "fsafch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                 break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -882,7 +881,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
         case FSF_GOOD:
-		zfcp_dbf_san_ct_response(req);
+		zfcp_dbf_san_res("fsscth1", req);
 		ct->status = 0;
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -902,7 +901,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
 		/* fall through */
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
@@ -1025,7 +1024,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
 	req->qtcb->header.port_handle = wka_port->handle;
 	req->data = ct;
 
-	zfcp_dbf_san_ct_request(req, wka_port->d_id);
+	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1053,7 +1052,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
 	case FSF_GOOD:
-		zfcp_dbf_san_els_response(req);
+		zfcp_dbf_san_res("fsselh1", req);
 		send_els->status = 0;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1127,7 +1126,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
 	req->handler = zfcp_fsf_send_els_handler;
 	req->data = els;
 
-	zfcp_dbf_san_els_request(req);
+	zfcp_dbf_san_req("fssels1", req, d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1448,7 +1447,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1580,7 +1579,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 
 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
+		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
 	}
 
 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
@@ -1638,7 +1637,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 
 	switch (header->fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ACCESS_DENIED:
@@ -1654,7 +1653,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fscpph2", req);
+				     "fscpph2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1743,7 +1742,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
 
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
 		/* fall through */
 	case FSF_LUN_ALREADY_OPEN:
 		break;
@@ -1755,8 +1754,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_SHARING_VIOLATION:
@@ -1852,20 +1850,18 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2002,13 +1998,12 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 	switch (header->fsf_status) {
 	case FSF_HANDLE_MISMATCH:
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_FCPLUN_NOT_VALID:
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2026,7 +2021,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch3", req);
+					  "fssfch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_CMND_LENGTH_NOT_VALID:
@@ -2037,21 +2032,20 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch4", req);
+					  "fssfch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fssfch6", req);
+				    "fssfch6");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2104,7 +2098,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
 
 skip_fsfstatus:
 	zfcp_fsf_req_trace(req, scpnt);
-	zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
+	zfcp_dbf_scsi_result(scpnt, req);
 
 	scpnt->host_scribble = NULL;
 	(scpnt->scsi_done) (scpnt);
@@ -2420,3 +2414,12 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
 			break;
 	}
 }
+
+struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
+				      struct qdio_buffer *sbal)
+{
+	struct qdio_buffer_element *sbale = &sbal->element[0];
+	u64 req_id = (unsigned long) sbale->addr;
+
+	return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
+}

+ 5 - 5
drivers/s390/scsi/zfcp_qdio.c

@@ -41,7 +41,7 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
 		zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter,
 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-				ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
+				ZFCP_STATUS_COMMON_ERP_FAILED, id);
 }
 
 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -74,7 +74,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 		return;
 	}
@@ -97,7 +96,6 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 	int sbal_idx, sbal_no;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
 		return;
 	}
@@ -116,7 +114,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 	 * put SBALs back to response queue
 	 */
 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
 }
 
 static struct qdio_buffer_element *
@@ -236,7 +234,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 	if (!ret) {
 		atomic_inc(&qdio->req_q_full);
 		/* assume hanging outbound queue, try queue recovery */
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 	}
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -309,6 +307,7 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 		return -ENOMEM;
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_waitqueue_head(&qdio->req_q_wq);
 
 	return qdio_allocate(&init_data);
 }
@@ -393,6 +392,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
 	/* set index of first avalable SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	return 0;
 

+ 23 - 20
drivers/s390/scsi/zfcp_scsi.c

@@ -30,6 +30,10 @@ module_param_named(dif, enable_dif, bool, 0600);
 MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
 #endif
 
+static bool allow_lun_scan = 1;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
+
 static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
 					int reason)
 {
@@ -68,11 +72,8 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 
 static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 {
-	struct zfcp_adapter *adapter =
-		(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
-
 	set_host_byte(scpnt, result);
-	zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+	zfcp_dbf_scsi_fail_send(scpnt);
 	scpnt->scsi_done(scpnt);
 }
 
@@ -80,7 +81,6 @@ static
 int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
-	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
 	int    status, scsi_result, ret;
 
@@ -91,7 +91,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
-		zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+		zfcp_dbf_scsi_fail_send(scpnt);
 		scpnt->scsi_done(scpnt);
 		return 0;
 	}
@@ -134,6 +134,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
+	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (!port)
@@ -143,7 +144,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	if (unit)
 		put_device(&unit->dev);
 
-	if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+	if (!unit && !(allow_lun_scan && npiv)) {
 		put_device(&port->dev);
 		return -ENXIO;
 	}
@@ -158,7 +159,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 	spin_lock_init(&zfcp_sdev->latencies.lock);
 
 	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
 	zfcp_erp_wait(port->adapter);
 
 	return 0;
@@ -182,8 +183,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
 	if (!old_req) {
 		write_unlock_irqrestore(&adapter->abort_lock, flags);
-		zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
-				    old_reqid);
+		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
 		return FAILED; /* completion could be in progress */
 	}
 	old_req->data = NULL;
@@ -198,29 +198,32 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 
 		zfcp_erp_wait(adapter);
 		ret = fc_block_scsi_eh(scpnt);
-		if (ret)
+		if (ret) {
+			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
 			return ret;
+		}
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
-					    old_reqid);
+			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
 			return SUCCESS;
 		}
 	}
-	if (!abrt_req)
+	if (!abrt_req) {
+		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
 		return FAILED;
+	}
 
 	wait_for_completion(&abrt_req->completion);
 
 	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
-		dbf_tag = "okay";
+		dbf_tag = "abrt_ok";
 	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
-		dbf_tag = "lte2";
+		dbf_tag = "abrt_nn";
 	else {
-		dbf_tag = "fail";
+		dbf_tag = "abrt_fa";
 		retval = FAILED;
 	}
-	zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
+	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
 	zfcp_fsf_req_free(abrt_req);
 	return retval;
 }
@@ -280,7 +283,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret;
 
-	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
+	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	ret = fc_block_scsi_eh(scpnt);
 	if (ret)
@@ -518,7 +521,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 
 	if (port) {
-		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
+		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
 		put_device(&port->dev);
 	}
 }

+ 4 - 5
drivers/s390/scsi/zfcp_sysfs.c

@@ -105,8 +105,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
 		return -EINVAL;
 
 	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
-			     NULL);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
 	zfcp_erp_wait(port->adapter);
 
 	return count;
@@ -148,7 +147,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
 	if (sdev) {
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "syufai2", NULL);
+				    "syufai2");
 		zfcp_erp_wait(unit->port->adapter);
 	} else
 		zfcp_unit_scsi_scan(unit);
@@ -198,7 +197,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"syafai2", NULL);
+				"syafai2");
 	zfcp_erp_wait(adapter);
 out:
 	zfcp_ccw_adapter_put(adapter);
@@ -256,7 +255,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 
 	put_device(&port->dev);
 
-	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
+	zfcp_erp_port_shutdown(port, 0, "syprs_1");
 	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
  out:
 	zfcp_ccw_adapter_put(adapter);

+ 1 - 2
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -1171,9 +1171,8 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
 	arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
 	if ( arccdbsize > 256)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
-	if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){
+	if (pcmd->sc_data_direction == DMA_TO_DEVICE)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
-	}
 	ccb->arc_cdb_size = arccdbsize;
 	return SUCCESS;
 }

+ 3 - 2
drivers/scsi/be2iscsi/be_main.c

@@ -3785,7 +3785,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 	dma_addr_t paddr;
 
 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
-					  GFP_KERNEL, &paddr);
+					  GFP_ATOMIC, &paddr);
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
 	io_task->bhs_pa.u.a64.address = paddr;
@@ -3914,7 +3914,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 			io_task->psgl_handle = NULL;
 		}
 	} else {
-		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
+		if (task->hdr &&
+		   ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
 			return;
 		if (io_task->psgl_handle) {
 			spin_lock(&phba->mgmt_sgl_lock);

+ 1 - 3
drivers/scsi/bfa/Makefile

@@ -3,6 +3,4 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
 bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
-bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
-
-ccflags-y := -DBFA_PERF_BUILD
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o

+ 3 - 33
drivers/scsi/bfa/bfa.h

@@ -17,7 +17,7 @@
 #ifndef __BFA_H__
 #define __BFA_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfa_plog.h"
 #include "bfa_defs_svc.h"
@@ -33,7 +33,6 @@ typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
  * Interrupt message handlers
  */
 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
-void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
 /*
  * Request and response queue related defines
@@ -121,8 +120,8 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 									\
 		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
 									\
-		bfa_assert(((__reqq) < BFI_IOC_MAX_CQS));      \
-		bfa_assert((__wqe)->qresume && (__wqe)->cbarg);      \
+		WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS));			\
+		WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg));		\
 									\
 		list_add_tail(&(__wqe)->qe, waitq);      \
 	} while (0)
@@ -297,7 +296,6 @@ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
 		      struct bfa_iocfc_cfg_s *cfg,
 		      struct bfa_meminfo_s *meminfo,
 		      struct bfa_pcidev_s *pcidev);
-void bfa_iocfc_detach(struct bfa_s *bfa);
 void bfa_iocfc_init(struct bfa_s *bfa);
 void bfa_iocfc_start(struct bfa_s *bfa);
 void bfa_iocfc_stop(struct bfa_s *bfa);
@@ -333,12 +331,9 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
 			   u32 *maxvec);
 void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
 				 u32 *end);
-void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
 void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
 wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
 wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
-void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
-				struct bfa_boot_pbc_s *pbcfg);
 int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 				struct bfi_pbc_vport_s *pbc_vport);
 
@@ -386,19 +381,11 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
 void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		struct bfa_meminfo_s *meminfo,
 		struct bfa_pcidev_s *pcidev);
-void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
-void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
 void bfa_detach(struct bfa_s *bfa);
-void bfa_init(struct bfa_s *bfa);
-void bfa_start(struct bfa_s *bfa);
-void bfa_stop(struct bfa_s *bfa);
-void bfa_attach_fcs(struct bfa_s *bfa);
 void bfa_cb_init(void *bfad, bfa_status_t status);
 void bfa_cb_updateq(void *bfad, bfa_status_t status);
 
 bfa_boolean_t bfa_intx(struct bfa_s *bfa);
-void bfa_intx_disable(struct bfa_s *bfa);
-void bfa_intx_enable(struct bfa_s *bfa);
 void bfa_isr_enable(struct bfa_s *bfa);
 void bfa_isr_disable(struct bfa_s *bfa);
 
@@ -408,31 +395,14 @@ void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
 
 typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
 void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
-void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
 
-void bfa_adapter_get_attr(struct bfa_s *bfa,
-			  struct bfa_adapter_attr_s *ad_attr);
-u64 bfa_adapter_get_id(struct bfa_s *bfa);
 
 bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 				   struct bfa_iocfc_intr_attr_s *attr);
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_chip_reset(struct bfa_s *bfa);
-void bfa_timer_tick(struct bfa_s *bfa);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
 	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
-/*
- * BFA debug API functions
- */
-bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
-			      u32 *offset, int *buflen);
-void bfa_debug_fwsave_clear(struct bfa_s *bfa);
-bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
-bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
-
 #endif /* __BFA_H__ */

+ 0 - 169
drivers/scsi/bfa/bfa_cb_ioim.h

@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_HCB_IOIM_H__
-#define __BFA_HCB_IOIM_H__
-
-#include "bfa_os_inc.h"
-/*
- * task attribute values in FCP-2 FCP_CMND IU
- */
-#define SIMPLE_Q    0
-#define HEAD_OF_Q   1
-#define ORDERED_Q   2
-#define ACA_Q	    4
-#define UNTAGGED    5
-
-static inline lun_t
-bfad_int_to_lun(u32 luno)
-{
-	union {
-		u16	scsi_lun[4];
-		lun_t		bfa_lun;
-	} lun;
-
-	lun.bfa_lun     = 0;
-	lun.scsi_lun[0] = cpu_to_be16(luno);
-
-	return lun.bfa_lun;
-}
-
-/*
- * Get LUN for the I/O request
- */
-#define bfa_cb_ioim_get_lun(__dio)	\
-	bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
-
-/*
- * Get CDB for the I/O request
- */
-static inline u8 *
-bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return (u8 *) cmnd->cmnd;
-}
-
-/*
- * Get I/O direction (read/write) for the I/O request
- */
-static inline enum fcp_iodir
-bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	enum dma_data_direction dmadir;
-
-	dmadir = cmnd->sc_data_direction;
-	if (dmadir == DMA_TO_DEVICE)
-		return FCP_IODIR_WRITE;
-	else if (dmadir == DMA_FROM_DEVICE)
-		return FCP_IODIR_READ;
-	else
-		return FCP_IODIR_NONE;
-}
-
-/*
- * Get IO size in bytes for the I/O request
- */
-static inline u32
-bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return scsi_bufflen(cmnd);
-}
-
-/*
- * Get timeout for the I/O request
- */
-static inline u8
-bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	/*
-	 * TBD: need a timeout for scsi passthru
-	 */
-	if (cmnd->device->host == NULL)
-		return 4;
-
-	return 0;
-}
-
-/*
- * Get Command Reference Number for the I/O request. 0 if none.
- */
-static inline u8
-bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get SAM-3 priority for the I/O request. 0 is default.
- */
-static inline u8
-bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
- */
-static inline u8
-bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	u8	task_attr = UNTAGGED;
-
-	if (cmnd->device->tagged_supported) {
-		switch (cmnd->tag) {
-		case HEAD_OF_QUEUE_TAG:
-			task_attr = HEAD_OF_Q;
-			break;
-		case ORDERED_QUEUE_TAG:
-			task_attr = ORDERED_Q;
-			break;
-		default:
-			task_attr = SIMPLE_Q;
-			break;
-		}
-	}
-
-	return task_attr;
-}
-
-/*
- * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
- */
-static inline u8
-bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return cmnd->cmd_len;
-}
-
-/*
- * Assign queue to be used for the I/O request. This value depends on whether
- * the driver wants to use the queues via any specific algorithm. Currently,
- * this is not supported.
- */
-#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
-
-#endif /* __BFA_HCB_IOIM_H__ */

+ 104 - 269
drivers/scsi/bfa/bfa_core.c

@@ -15,12 +15,99 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(HAL, CORE);
 
+/*
+ * BFA module list terminated by NULL
+ */
+static struct bfa_module_s *hal_mods[] = {
+	&hal_mod_sgpg,
+	&hal_mod_fcport,
+	&hal_mod_fcxp,
+	&hal_mod_lps,
+	&hal_mod_uf,
+	&hal_mod_rport,
+	&hal_mod_fcpim,
+	NULL
+};
+
+/*
+ * Message handlers for various modules.
+ */
+static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
+	bfa_isr_unhandled,	/* NONE */
+	bfa_isr_unhandled,	/* BFI_MC_IOC */
+	bfa_isr_unhandled,	/* BFI_MC_DIAG */
+	bfa_isr_unhandled,	/* BFI_MC_FLASH */
+	bfa_isr_unhandled,	/* BFI_MC_CEE */
+	bfa_fcport_isr,		/* BFI_MC_FCPORT */
+	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
+	bfa_isr_unhandled,	/* BFI_MC_LL */
+	bfa_uf_isr,		/* BFI_MC_UF */
+	bfa_fcxp_isr,		/* BFI_MC_FCXP */
+	bfa_lps_isr,		/* BFI_MC_LPS */
+	bfa_rport_isr,		/* BFI_MC_RPORT */
+	bfa_itnim_isr,		/* BFI_MC_ITNIM */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
+	bfa_ioim_isr,		/* BFI_MC_IOIM */
+	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
+	bfa_tskim_isr,		/* BFI_MC_TSKIM */
+	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
+	bfa_isr_unhandled,	/* BFI_MC_IPFC */
+	bfa_isr_unhandled,	/* BFI_MC_PORT */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+};
+/*
+ * Message handlers for mailbox command classes
+ */
+static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
+	NULL,
+	NULL,		/* BFI_MC_IOC   */
+	NULL,		/* BFI_MC_DIAG  */
+	NULL,		/* BFI_MC_FLASH */
+	NULL,		/* BFI_MC_CEE   */
+	NULL,		/* BFI_MC_PORT  */
+	bfa_iocfc_isr,	/* BFI_MC_IOCFC */
+	NULL,
+};
+
+
+
+static void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+	struct bfa_port_s	*port = &bfa->modules.port;
+	u32			dm_len;
+	u8			*dm_kva;
+	u64			dm_pa;
+
+	dm_len = bfa_port_meminfo();
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa  = bfa_meminfo_dma_phys(mi);
+
+	memset(port, 0, sizeof(struct bfa_port_s));
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}
+
 /*
  * BFA IOC FC related definitions
  */
@@ -66,18 +153,6 @@ static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 /*
  * BFA Interrupt handling functions
  */
-static void
-bfa_msix_errint(struct bfa_s *bfa, u32 intr)
-{
-	bfa_ioc_error_isr(&bfa->ioc);
-}
-
-static void
-bfa_msix_lpu(struct bfa_s *bfa)
-{
-	bfa_ioc_mbox_isr(&bfa->ioc);
-}
-
 static void
 bfa_reqq_resume(struct bfa_s *bfa, int qid)
 {
@@ -104,9 +179,6 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
 	bfa_intx(bfa);
 }
 
-/*
- *  hal_intr_api
- */
 bfa_boolean_t
 bfa_intx(struct bfa_s *bfa)
 {
@@ -150,18 +222,6 @@ bfa_intx(struct bfa_s *bfa)
 	return BFA_TRUE;
 }
 
-void
-bfa_intx_enable(struct bfa_s *bfa)
-{
-	writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
-}
-
-void
-bfa_intx_disable(struct bfa_s *bfa)
-{
-	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
-}
-
 void
 bfa_isr_enable(struct bfa_s *bfa)
 {
@@ -225,7 +285,7 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 	bfa_trc(bfa, m->mhdr.msg_class);
 	bfa_trc(bfa, m->mhdr.msg_id);
 	bfa_trc(bfa, m->mhdr.mtag.i2htok);
-	bfa_assert(0);
+	WARN_ON(1);
 	bfa_trc_stop(bfa->trcmod);
 }
 
@@ -236,8 +296,6 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
 	u32 pi, ci;
 	struct list_head *waitq;
 
-	bfa_trc_fp(bfa, qid);
-
 	qid &= (BFI_IOC_MAX_CQS - 1);
 
 	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
@@ -245,16 +303,10 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
 	ci = bfa_rspq_ci(bfa, qid);
 	pi = bfa_rspq_pi(bfa, qid);
 
-	bfa_trc_fp(bfa, ci);
-	bfa_trc_fp(bfa, pi);
-
 	if (bfa->rme_process) {
 		while (ci != pi) {
 			m = bfa_rspq_elem(bfa, qid, ci);
-			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
-
 			bfa_isrs[m->mhdr.msg_class] (bfa, m);
-
 			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
 		}
 	}
@@ -282,7 +334,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
 	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-		bfa_msix_lpu(bfa);
+		bfa_ioc_mbox_isr(&bfa->ioc);
 
 	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
 		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
@@ -313,22 +365,16 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 		}
 
 		writel(intr, bfa->iocfc.bfa_regs.intr_status);
-		bfa_msix_errint(bfa, intr);
+		bfa_ioc_error_isr(&bfa->ioc);
 	}
 }
 
-void
-bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
-{
-	bfa_isrs[mc] = isr_func;
-}
-
 /*
  * BFA IOC FC related functions
  */
 
 /*
- *  hal_ioc_pvt BFA IOC private functions
+ *  BFA IOC private functions
  */
 
 static void
@@ -379,7 +425,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
 	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
 	int		i;
 
-	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
+	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
 	bfa_trc(bfa, cfg->fwcfg.num_cqs);
 
 	bfa_iocfc_reset_queues(bfa);
@@ -488,8 +534,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
 	 * First allocate dma memory for IOC.
 	 */
 	bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-	dm_kva += bfa_ioc_meminfo();
-	dm_pa  += bfa_ioc_meminfo();
+	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+	dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	/*
 	 * Claim DMA-able memory for the request/response queues and for shadow
@@ -552,7 +598,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
 
-	dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
+	dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 	if (dbgsz > 0) {
 		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
 		bfa_meminfo_kva(meminfo) += dbgsz;
@@ -699,7 +745,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
 		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
 			     bfa);
 	else {
-		bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
+		WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
 		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
 			     bfa);
 	}
@@ -735,9 +781,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
 	bfa_isr_enable(bfa);
 }
 
-/*
- *  hal_ioc_public
- */
 
 /*
  * Query IOC memory requirement information.
@@ -747,11 +790,11 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 		  u32 *dm_len)
 {
 	/* dma memory for IOC */
-	*dm_len += bfa_ioc_meminfo();
+	*dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	bfa_iocfc_fw_cfg_sz(cfg, dm_len);
 	bfa_iocfc_cqs_sz(cfg, dm_len);
-	*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
+	*km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
 /*
@@ -783,22 +826,13 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
 	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
 	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
-	bfa_timer_init(&bfa->timer_mod);
+	INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
 
 	INIT_LIST_HEAD(&bfa->comp_q);
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
 		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
 }
 
-/*
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_detach(struct bfa_s *bfa)
-{
-	bfa_ioc_detach(&bfa->ioc);
-}
-
 /*
  * Query IOC memory requirement information.
  */
@@ -852,22 +886,10 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
 		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
-void
-bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
-{
-	bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
-}
-
-u64
-bfa_adapter_get_id(struct bfa_s *bfa)
-{
-	return bfa_ioc_get_adid(&bfa->ioc);
-}
-
 void
 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
 {
@@ -976,18 +998,6 @@ bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
 	memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
 }
 
-void
-bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
-	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
-	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
-	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
-}
-
 int
 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 {
@@ -998,9 +1008,6 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 	return cfgrsp->pbc_cfg.nvports;
 }
 
-/*
- *  hal_api
- */
 
 /*
  * Use this function query the memory requirement of the BFA library.
@@ -1036,7 +1043,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
 	int		i;
 	u32	km_len = 0, dm_len = 0;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
 	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
@@ -1090,7 +1097,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
 	bfa->fcs = BFA_FALSE;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	/*
 	 * initialize all memory pointers for iterative allocation
@@ -1129,79 +1136,7 @@ bfa_detach(struct bfa_s *bfa)
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->detach(bfa);
-
-	bfa_iocfc_detach(bfa);
-}
-
-
-void
-bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
-{
-	bfa->trcmod = trcmod;
-}
-
-void
-bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
-{
-	bfa->plog = plog;
-}
-
-/*
- * Initialize IOC.
- *
- * This function will return immediately, when the IOC initialization is
- * completed, the bfa_cb_init() will be called.
- *
- * @param[in]	bfa	instance
- *
- * @return void
- *
- * Special Considerations:
- *
- * @note
- * When this function returns, the driver should register the interrupt service
- * routine(s) and enable the device interrupts. If this is not done,
- * bfa_cb_init() will never get called
- */
-void
-bfa_init(struct bfa_s *bfa)
-{
-	bfa_iocfc_init(bfa);
-}
-
-/*
- * Use this function initiate the IOC configuration setup. This function
- * will return immediately.
- *
- * @param[in]	bfa	instance
- *
- * @return None
- */
-void
-bfa_start(struct bfa_s *bfa)
-{
-	bfa_iocfc_start(bfa);
-}
-
-/*
- * Use this function quiese the IOC. This function will return immediately,
- * when the IOC is actually stopped, the bfad->comp will be set.
- *
- * @param[in]bfa - pointer to bfa_t.
- *
- * @return None
- *
- * Special Considerations:
- * bfad->comp can be set before or after bfa_stop() returns.
- *
- * @note
- * In case of any failure, we could handle it automatically by doing a
- * reset and then succeed the bfa_stop() call.
- */
-void
-bfa_stop(struct bfa_s *bfa)
-{
-	bfa_iocfc_stop(bfa);
+	bfa_ioc_detach(&bfa->ioc);
 }
 
 void
@@ -1237,20 +1172,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
 	}
 }
 
-void
-bfa_attach_fcs(struct bfa_s *bfa)
-{
-	bfa->fcs = BFA_TRUE;
-}
-
-/*
- * Periodic timer heart beat from driver
- */
-void
-bfa_timer_tick(struct bfa_s *bfa)
-{
-	bfa_timer_beat(&bfa->timer_mod);
-}
 
 /*
  * Return the list of PCI vendor/device id lists supported by this
@@ -1321,89 +1242,3 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
 	cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
 	cfg->drvcfg.min_cfg	   = BFA_TRUE;
 }
-
-void
-bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
-{
-	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
-}
-
-/*
- * Retrieve firmware trace information on IOC failure.
- */
-bfa_status_t
-bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Clear the saved firmware trace information of an IOC.
- */
-void
-bfa_debug_fwsave_clear(struct bfa_s *bfa)
-{
-	bfa_ioc_debug_fwsave_clear(&bfa->ioc);
-}
-
-/*
- * Fetch firmware trace data.
- *
- * @param[in]		bfa			BFA instance
- * @param[out]		trcdata		Firmware trace buffer
- * @param[in,out]	trclen		Firmware trace buffer len
- *
- * @retval BFA_STATUS_OK			Firmware trace is fetched.
- * @retval BFA_STATUS_INPROGRESS	Firmware trace fetch is in progress.
- */
-bfa_status_t
-bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Dump firmware memory.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		buf		buffer for dump
- * @param[in,out]	offset		smem offset to start read
- * @param[in,out]	buflen		length of buffer
- *
- * @retval BFA_STATUS_OK		Firmware memory is dumped.
- * @retval BFA_STATUS_INPROGRESS	Firmware memory dump is in progress.
- */
-bfa_status_t
-bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
-{
-	return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
-}
-/*
- * Reset hw semaphore & usage cnt regs and initialize.
- */
-void
-bfa_chip_reset(struct bfa_s *bfa)
-{
-	bfa_ioc_ownership_reset(&bfa->ioc);
-	bfa_ioc_pll_init(&bfa->ioc);
-}
-
-/*
- * Fetch firmware statistics data.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		data		Firmware stats buffer
- *
- * @retval BFA_STATUS_OK		Firmware trace is fetched.
- */
-bfa_status_t
-bfa_fw_stats_get(struct bfa_s *bfa, void *data)
-{
-	return bfa_ioc_fw_stats_get(&bfa->ioc, data);
-}
-
-bfa_status_t
-bfa_fw_stats_clear(struct bfa_s *bfa)
-{
-	return bfa_ioc_fw_stats_clear(&bfa->ioc);
-}

+ 49 - 47
drivers/scsi/bfa/bfa_cs.h

@@ -22,7 +22,7 @@
 #ifndef __BFA_CS_H__
 #define __BFA_CS_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 /*
  * BFA TRC
@@ -32,12 +32,20 @@
 #define BFA_TRC_MAX	(4 * 1024)
 #endif
 
+#define BFA_TRC_TS(_trcm)                               \
+	({                                              \
+		struct timeval tv;                      \
+							\
+		do_gettimeofday(&tv);                   \
+		(tv.tv_sec*1000000+tv.tv_usec);         \
+	})
+
 #ifndef BFA_TRC_TS
 #define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
 #endif
 
 struct bfa_trc_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u16	fileno;
 	u16	line;
 #else
@@ -99,13 +107,6 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm)
 	trcm->stopped = 1;
 }
 
-#ifdef FWTRC
-extern void dc_flush(void *data);
-#else
-#define dc_flush(data)
-#endif
-
-
 static inline void
 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
 {
@@ -119,12 +120,10 @@ __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
 	trc->line = (u16) line;
 	trc->data.u64 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
 
@@ -141,42 +140,18 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 	trc->line = (u16) line;
 	trc->data.u32.u32 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
-#ifndef BFA_PERF_BUILD
-#define bfa_trc_fp(_trcp, _data)	bfa_trc(_trcp, _data)
-#else
-#define bfa_trc_fp(_trcp, _data)
-#endif
-
-/*
- * @ BFA LOG interfaces
- */
-#define bfa_assert(__cond)	do {					\
-	if (!(__cond)) {						\
-		printk(KERN_ERR "assert(%s) failed at %s:%d\\n",         \
-		#__cond, __FILE__, __LINE__);				\
-	}								\
-} while (0)
-
 #define bfa_sm_fault(__mod, __event)	do {				\
 	bfa_trc(__mod, (((u32)0xDEAD << 16) | __event));		\
 	printk(KERN_ERR	"Assertion failure: %s:%d: %d",			\
 		__FILE__, __LINE__, (__event));				\
 } while (0)
 
-#ifndef BFA_PERF_BUILD
-#define bfa_assert_fp(__cond)	bfa_assert(__cond)
-#else
-#define bfa_assert_fp(__cond)
-#endif
-
 /* BFA queue definitions */
 #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
 #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
@@ -199,7 +174,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
 				(struct list_head *) (_q);		\
 		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -214,7 +188,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =	\
 			(struct list_head *) (_q);			\
 		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -236,16 +209,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
 	return 0;
 }
 
-/*
- * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
- * consistent across modules)
- */
-#ifndef BFA_PERF_BUILD
-#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
-#else
-#define BFA_Q_DBG_INIT(_qe)
-#endif
-
 #define bfa_q_is_on_q(_q, _qe)      \
 	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
 
@@ -361,4 +324,43 @@ bfa_wc_wait(struct bfa_wc_s *wc)
 	bfa_wc_down(wc);
 }
 
+static inline void
+wwn2str(char *wwn_str, u64 wwn)
+{
+	union {
+		u64 wwn;
+		u8 byte[8];
+	} w;
+
+	w.wwn = wwn;
+	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
+		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
+		w.byte[6], w.byte[7]);
+}
+
+static inline void
+fcid2str(char *fcid_str, u32 fcid)
+{
+	union {
+		u32 fcid;
+		u8 byte[4];
+	} f;
+
+	f.fcid = fcid;
+	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
+}
+
+#define bfa_swap_3b(_x)				\
+	((((_x) & 0xff) << 16) |		\
+	((_x) & 0x00ff00) |			\
+	(((_x) & 0xff0000) >> 16))
+
+#ifndef __BIG_ENDIAN
+#define bfa_hton3b(_x)  bfa_swap_3b(_x)
+#else
+#define bfa_hton3b(_x)  (_x)
+#endif
+
+#define bfa_ntoh3b(_x)  bfa_hton3b(_x)
+
 #endif /* __BFA_CS_H__ */

+ 3 - 3
drivers/scsi/bfa/bfa_defs.h

@@ -19,7 +19,7 @@
 #define __BFA_DEFS_H__
 
 #include "bfa_fc.h"
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 #define BFA_MFG_SERIALNUM_SIZE                  11
 #define STRSZ(_n)                               (((_n) + 4) & ~3)
@@ -446,8 +446,8 @@ enum bfa_boot_bootopt {
  * Boot lun information.
  */
 struct bfa_boot_bootlun_s {
-	wwn_t   pwwn;   /*  port wwn of target */
-	lun_t   lun;    /*  64-bit lun */
+	wwn_t   pwwn;		/*  port wwn of target */
+	struct scsi_lun   lun;  /*  64-bit lun */
 };
 #pragma pack()
 

+ 4 - 4
drivers/scsi/bfa/bfa_defs_svc.h

@@ -34,8 +34,8 @@
 struct bfa_iocfc_intr_attr_s {
 	u8		coalesce;	/*  enable/disable coalescing */
 	u8		rsvd[3];
-	u16	latency;	/*  latency in microseconds   */
-	u16	delay;		/*  delay in microseconds     */
+	__be16	latency;	/*  latency in microseconds   */
+	__be16	delay;		/*  delay in microseconds     */
 };
 
 /*
@@ -743,7 +743,7 @@ struct bfa_port_cfg_s {
 	u8	 qos_enabled;	/*  qos enabled or not		*/
 	u8	 cfg_hardalpa;	/*  is hard alpa configured	*/
 	u8	 hardalpa;	/*  configured hard alpa	*/
-	u16 maxfrsize;	/*  maximum frame size		*/
+	__be16	 maxfrsize;	/*  maximum frame size		*/
 	u8	 rx_bbcredit;	/*  receive buffer credits	*/
 	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
 	u8	 ratelimit;	/*  ratelimit enabled or not	*/
@@ -843,7 +843,7 @@ struct bfa_fcport_fcf_s {
 	u8	 fka_disabled;   /*  FKA is disabled	  */
 	u8	 maxsz_verified; /*  FCoE max size verified   */
 	u8	 fc_map[3];      /*  FC map		   */
-	u16	vlan;	   /*  FCoE vlan tag/priority   */
+	__be16	 vlan;	   /*  FCoE vlan tag/priority   */
 	u32	fka_adv_per;    /*  FIP  ka advert. period   */
 	mac_t	   mac;	    /*  FCF mac		  */
 };

+ 0 - 107
drivers/scsi/bfa/bfa_drv.c

@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include "bfa_modules.h"
-
-/*
- * BFA module list terminated by NULL
- */
-struct bfa_module_s *hal_mods[] = {
-	&hal_mod_sgpg,
-	&hal_mod_fcport,
-	&hal_mod_fcxp,
-	&hal_mod_lps,
-	&hal_mod_uf,
-	&hal_mod_rport,
-	&hal_mod_fcpim,
-	NULL
-};
-
-/*
- * Message handlers for various modules.
- */
-bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
-	bfa_isr_unhandled,	/* NONE */
-	bfa_isr_unhandled,	/* BFI_MC_IOC */
-	bfa_isr_unhandled,	/* BFI_MC_DIAG */
-	bfa_isr_unhandled,	/* BFI_MC_FLASH */
-	bfa_isr_unhandled,	/* BFI_MC_CEE */
-	bfa_fcport_isr,		/* BFI_MC_FCPORT */
-	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
-	bfa_isr_unhandled,	/* BFI_MC_LL */
-	bfa_uf_isr,		/* BFI_MC_UF */
-	bfa_fcxp_isr,		/* BFI_MC_FCXP */
-	bfa_lps_isr,		/* BFI_MC_LPS */
-	bfa_rport_isr,		/* BFI_MC_RPORT */
-	bfa_itnim_isr,		/* BFI_MC_ITNIM */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
-	bfa_ioim_isr,		/* BFI_MC_IOIM */
-	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
-	bfa_tskim_isr,		/* BFI_MC_TSKIM */
-	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
-	bfa_isr_unhandled,	/* BFI_MC_IPFC */
-	bfa_isr_unhandled,	/* BFI_MC_PORT */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-};
-
-
-/*
- * Message handlers for mailbox command classes
- */
-bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
-	NULL,
-	NULL,			/* BFI_MC_IOC   */
-	NULL,			/* BFI_MC_DIAG  */
-	NULL,		/* BFI_MC_FLASH */
-	NULL,			/* BFI_MC_CEE   */
-	NULL,			/* BFI_MC_PORT  */
-	bfa_iocfc_isr,		/* BFI_MC_IOCFC */
-	NULL,
-};
-
-
-
-void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
-{
-	struct bfa_port_s	*port = &bfa->modules.port;
-	u32		dm_len;
-	u8			*dm_kva;
-	u64		dm_pa;
-
-	dm_len = bfa_port_meminfo();
-	dm_kva = bfa_meminfo_dma_virt(mi);
-	dm_pa  = bfa_meminfo_dma_phys(mi);
-
-	memset(port, 0, sizeof(struct bfa_port_s));
-	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
-	bfa_port_mem_claim(port, dm_kva, dm_pa);
-
-	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
-	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
-}

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 151 - 457
drivers/scsi/bfa/bfa_fc.h


+ 52 - 57
drivers/scsi/bfa/bfa_fcbuild.c

@@ -18,16 +18,16 @@
  * fcbuild.c - FC link service frame building and parsing routines
  */
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fcbuild.h"
 
 /*
  * static build functions
  */
 static void     fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static void     fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static struct fchs_s fc_els_req_tmpl;
 static struct fchs_s fc_els_rsp_tmpl;
 static struct fchs_s fc_bls_req_tmpl;
@@ -48,7 +48,7 @@ fcbuild_init(void)
 	fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
 	fc_els_req_tmpl.type = FC_TYPE_ELS;
 	fc_els_req_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fc_els_req_tmpl.rx_id = FC_RXID_ANY;
 
@@ -59,7 +59,7 @@ fcbuild_init(void)
 	fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
 	fc_els_rsp_tmpl.type = FC_TYPE_ELS;
 	fc_els_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -68,7 +68,7 @@ fcbuild_init(void)
 	 */
 	fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
 	fc_bls_req_tmpl.type = FC_TYPE_BLS;
-	fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
 
 	/*
@@ -78,7 +78,7 @@ fcbuild_init(void)
 	fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
 	fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
 	fc_bls_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -129,7 +129,7 @@ fcbuild_init(void)
 	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
 	fcp_fchs_tmpl.type = FC_TYPE_FCP;
 	fcp_fchs_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
+		bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
 	fcp_fchs_tmpl.seq_id = 1;
 	fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
 }
@@ -143,7 +143,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
 	fchs->type = FC_TYPE_SERVICES;
 	fchs->f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fchs->rx_id = FC_RXID_ANY;
 	fchs->d_id = (d_id);
@@ -157,7 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 }
 
 void
-fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = (d_id);
@@ -166,7 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 }
 
 static void
-fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -196,7 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
 }
 
 static void
-fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -206,7 +206,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 
 static          u16
 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		 u16 ox_id, wwn_t port_name, wwn_t node_name,
+		 __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		 u16 pdu_size, u8 els_code)
 {
 	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
@@ -232,8 +232,8 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
 	       u8 set_npiv, u8 set_auth, u16 local_bb_credits)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
-	u32	*vvl_info;
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
+	__be32	*vvl_info;
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 
 u16
 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
-		   u16 ox_id, wwn_t port_name, wwn_t node_name,
+		   __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		   u16 pdu_size, u16 local_bb_credits)
 {
 	u32        d_id = 0;
@@ -289,7 +289,7 @@ u16
 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -392,7 +392,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 u16
 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id, enum bfa_lport_role role)
+		  __be16 ox_id, enum bfa_lport_role role)
 {
 	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
@@ -456,9 +456,9 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
 	return sizeof(struct fc_logo_s);
 }
 
-static          u16
+static u16
 fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		 u32 s_id, u16 ox_id, wwn_t port_name,
+		 u32 s_id, __be16 ox_id, wwn_t port_name,
 		 wwn_t node_name, u8 els_code)
 {
 	memset(adisc, '\0', sizeof(struct fc_adisc_s));
@@ -480,7 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 
 u16
 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
+		u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
 				node_name, FC_ELS_ADISC);
@@ -488,7 +488,7 @@ fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 
 u16
 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		   u32 s_id, u16 ox_id, wwn_t port_name,
+		   u32 s_id, __be16 ox_id, wwn_t port_name,
 		   wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
@@ -592,7 +592,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
 
 u16
 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id)
+		  __be16 ox_id)
 {
 	struct fc_els_cmd_s *acc = pld;
 
@@ -606,7 +606,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 u16
 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
-		u32 s_id, u16 ox_id, u8 reason_code,
+		u32 s_id, __be16 ox_id, u8 reason_code,
 		u8 reason_code_expl)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@@ -622,7 +622,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
 
 u16
 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id)
+		u32 s_id, __be16 ox_id, u16 rx_id)
 {
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
@@ -638,7 +638,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
 
 u16
 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
-		u32 s_id, u16 ox_id)
+		u32 s_id, __be16 ox_id)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
@@ -666,7 +666,7 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 
 u16
 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages)
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -690,7 +690,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 
 u16
 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, int num_pages)
+		  u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -728,7 +728,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 
 u16
 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, u32 data_format,
+		  u32 s_id, __be16 ox_id, u32 data_format,
 		  struct fc_rnid_common_id_data_s *common_id_data,
 		  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
@@ -770,10 +770,10 @@ u16
 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 		u32 s_id, u32 *pid_list, u16 npids)
 {
-	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
+	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
 	int i = 0;
 
-	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
+	fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
 
 	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
@@ -788,7 +788,7 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 
 u16
 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
-		u32 d_id, u32 s_id, u16 ox_id,
+		u32 d_id, u32 s_id, __be16 ox_id,
 		  struct fc_rpsc_speed_info_s *oper_speed)
 {
 	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
@@ -807,11 +807,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 	return sizeof(struct fc_rpsc_acc_s);
 }
 
-/*
- * TBD -
- * . get rid of unnecessary memsets
- */
-
 u16
 fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 {
@@ -995,7 +990,7 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
 }
 
 u16
-fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
 		u32 reason_code, u32 reason_expl)
 {
 	struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
@@ -1045,7 +1040,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
@@ -1061,7 +1056,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
@@ -1077,7 +1072,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
@@ -1104,7 +1099,7 @@ u16
 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
 		u8 set_br_reg, u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
@@ -1121,7 +1116,7 @@ u16
 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
 		u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 	u16        payldlen;
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
@@ -1143,7 +1138,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
 	u8         index;
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1167,7 +1162,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
@@ -1187,7 +1182,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
-	u32         d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32         d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
@@ -1209,7 +1204,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rspnid_req_s *rspnid =
 			(struct fcgs_rspnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
@@ -1229,7 +1224,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 
@@ -1249,7 +1244,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
@@ -1267,7 +1262,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
@@ -1286,7 +1281,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rcsid_req_s *rcsid =
 			(struct fcgs_rcsid_req_s *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
@@ -1304,7 +1299,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
@@ -1321,7 +1316,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
@@ -1341,7 +1336,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 {
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
@@ -1356,7 +1351,7 @@ void
 fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
 {
 	u8         index;
-	u32       *ptr = (u32 *) bit_mask;
+	__be32       *ptr = (__be32 *) bit_mask;
 	u32        type_value;
 
 	/*
@@ -1377,7 +1372,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
@@ -1397,7 +1392,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,

+ 15 - 15
drivers/scsi/bfa/bfa_fcbuild.h

@@ -21,7 +21,7 @@
 #ifndef __FCBUILD_H__
 #define __FCBUILD_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fc.h"
 #include "bfa_defs_fcs.h"
 
@@ -138,7 +138,7 @@ u16        fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
 			       u16 pdu_size);
 
 u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
-				   u32 s_id, u16 ox_id,
+				   u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name,
 				   u16 pdu_size,
 				   u16 local_bb_credits);
@@ -186,7 +186,7 @@ u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
 				   u16 pdu_size);
 
 u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-			u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
+			u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
 			       wwn_t node_name);
 
 enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
@@ -196,20 +196,20 @@ enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
 				 wwn_t port_name, wwn_t node_name);
 
 u16        fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-				   u32 d_id, u32 s_id, u16 ox_id,
+				   u32 d_id, u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name);
 u16        fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
-				u32 d_id, u32 s_id, u16 ox_id,
+				u32 d_id, u32 s_id, __be16 ox_id,
 				u8 reason_code, u8 reason_code_expl);
 u16        fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
-				u32 d_id, u32 s_id, u16 ox_id);
+				u32 d_id, u32 s_id, __be16 ox_id);
 u16        fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
 			      u32 s_id, u16 ox_id);
 
 enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
 
 u16        fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id,
+				  u32 s_id, __be16 ox_id,
 				  enum bfa_lport_role role);
 
 u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
@@ -218,7 +218,7 @@ u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
 
 u16        fc_rnid_acc_build(struct fchs_s *fchs,
 			struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
-			u16 ox_id, u32 data_format,
+			__be16 ox_id, u32 data_format,
 			struct fc_rnid_common_id_data_s *common_id_data,
 			struct fc_rnid_general_topology_data_s *gen_topo_data);
 
@@ -228,7 +228,7 @@ u16        fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
 			      u32 d_id, u32 s_id, u16 ox_id);
 u16        fc_rpsc_acc_build(struct fchs_s *fchs,
 			struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
-			u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+			__be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
 u16        fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
 				u8 fc4_type);
 
@@ -251,7 +251,7 @@ u16        fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
 			      u32 s_id, u16 ox_id, wwn_t port_name);
 
 u16        fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id);
+				  u32 s_id, __be16 ox_id);
 
 u16        fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 				     u16 cmd_code);
@@ -261,7 +261,7 @@ u16	fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
 void		fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
 
 void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-					 u16 ox_id);
+					 __be16 ox_id);
 
 enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -274,15 +274,15 @@ enum fc_parse_status	fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
 					wwn_t port_name);
 
 u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id);
+		u32 s_id, __be16 ox_id, u16 rx_id);
 
 int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
 
 u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -304,7 +304,7 @@ u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
 
 u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-		u16 ox_id, u32 reason_code, u32 reason_expl);
+		__be16 ox_id, u32 reason_code, u32 reason_expl);
 
 u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 		u32 port_id);

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 120 - 422
drivers/scsi/bfa/bfa_fcpim.c


+ 71 - 117
drivers/scsi/bfa/bfa_fcpim.h

@@ -41,7 +41,7 @@
 	(__itnim->ioprofile.iocomps[__index]++)
 
 #define BFA_IOIM_RETRY_TAG_OFFSET 11
-#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
 #define BFA_IOIM_RETRY_MAX 7
 
 /* Buckets are are 512 bytes to 2MB */
@@ -94,12 +94,12 @@ struct bfa_fcpim_mod_s {
 	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
 	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
 	struct list_head	tskim_free_q;
-	u32		ios_active;	/*  current active IOs	*/
-	u32		delay_comp;
+	u32			ios_active;	/*  current active IOs	*/
+	u32			delay_comp;
 	struct bfa_fcpim_del_itn_stats_s del_itn_stats;
 	bfa_boolean_t		ioredirect;
 	bfa_boolean_t		io_profile;
-	u32		io_profile_start_time;
+	u32			io_profile_start_time;
 	bfa_fcpim_profile_t     profile_comp;
 	bfa_fcpim_profile_t     profile_start;
 };
@@ -114,25 +114,24 @@ struct bfa_ioim_s {
 	struct bfa_fcpim_mod_s	*fcpim;		/*  parent fcpim module */
 	struct bfa_itnim_s	*itnim;		/*  i-t-n nexus for this IO  */
 	struct bfad_ioim_s	*dio;		/*  driver IO handle	*/
-	u16		iotag;		/*  FWI IO tag	*/
-	u16		abort_tag;	/*  unqiue abort request tag */
-	u16		nsges;		/*  number of SG elements */
-	u16		nsgpgs;		/*  number of SG pages	*/
+	u16			iotag;		/*  FWI IO tag	*/
+	u16			abort_tag;	/*  unqiue abort request tag */
+	u16			nsges;		/*  number of SG elements */
+	u16			nsgpgs;		/*  number of SG pages	*/
 	struct bfa_sgpg_s	*sgpg;		/*  first SG page	*/
 	struct list_head	sgpg_q;		/*  allocated SG pages	*/
 	struct bfa_cb_qe_s	hcb_qe;		/*  bfa callback qelem	*/
 	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
-	struct bfa_ioim_sp_s *iosp;		/*  slow-path IO handling */
-	u8		reqq;		/*  Request queue for I/O */
-	u64 start_time;			/*  IO's Profile start val */
+	struct bfa_ioim_sp_s	*iosp;		/*  slow-path IO handling */
+	u8			reqq;		/*  Request queue for I/O */
+	u64			start_time;	/*  IO's Profile start val */
 };
 
-
 struct bfa_ioim_sp_s {
 	struct bfi_msg_s	comp_rspmsg;	/*  IO comp f/w response */
 	u8			*snsinfo;	/*  sense info for this IO   */
-	struct bfa_sgpg_wqe_s sgpg_wqe;	/*  waitq elem for sgpg	*/
-	struct bfa_reqq_wait_s reqq_wait;	/*  to wait for room in reqq */
+	struct bfa_sgpg_wqe_s	sgpg_wqe;	/*  waitq elem for sgpg	*/
+	struct bfa_reqq_wait_s	reqq_wait;	/*  to wait for room in reqq */
 	bfa_boolean_t		abort_explicit;	/*  aborted by OS	*/
 	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
 };
@@ -143,35 +142,34 @@ struct bfa_ioim_sp_s {
 struct bfa_tskim_s {
 	struct list_head	qe;
 	bfa_sm_t		sm;
-	struct bfa_s	*bfa;	/*  BFA module  */
+	struct bfa_s		*bfa;	/*  BFA module  */
 	struct bfa_fcpim_mod_s  *fcpim;	/*  parent fcpim module	*/
 	struct bfa_itnim_s	*itnim;	/*  i-t-n nexus for this IO  */
-	struct bfad_tskim_s	*dtsk;   /*  driver task mgmt cmnd	*/
-	bfa_boolean_t	notify;	/*  notify itnim on TM comp  */
-	lun_t	lun;	/*  lun if applicable	*/
-	enum fcp_tm_cmnd	tm_cmnd;	/*  task management command  */
-	u16	tsk_tag;	/*  FWI IO tag	*/
-	u8	tsecs;	/*  timeout in seconds	*/
+	struct bfad_tskim_s	*dtsk;  /*  driver task mgmt cmnd	*/
+	bfa_boolean_t		notify;	/*  notify itnim on TM comp  */
+	struct scsi_lun		lun;	/*  lun if applicable	*/
+	enum fcp_tm_cmnd	tm_cmnd; /*  task management command  */
+	u16			tsk_tag; /*  FWI IO tag	*/
+	u8			tsecs;	/*  timeout in seconds	*/
 	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
 	struct list_head	io_q;	/*  queue of affected IOs	*/
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
+	struct bfa_wc_s		wc;	/*  waiting counter	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	enum bfi_tskim_status   tsk_status;  /*  TM status	*/
 };
 
-
 /*
  * BFA i-t-n (initiator mode)
  */
 struct bfa_itnim_s {
-	struct list_head	qe;		/*  queue element	*/
-	bfa_sm_t	  sm;		/*  i-t-n im BFA state machine  */
-	struct bfa_s	*bfa;		/*  bfa instance	*/
-	struct bfa_rport_s *rport;	/*  bfa rport	*/
-	void	*ditn;		/*  driver i-t-n structure	*/
+	struct list_head	qe;	/*  queue element	*/
+	bfa_sm_t		sm;	/*  i-t-n im BFA state machine  */
+	struct bfa_s		*bfa;	/*  bfa instance	*/
+	struct bfa_rport_s	*rport;	/*  bfa rport	*/
+	void			*ditn;	/*  driver i-t-n structure	*/
 	struct bfi_mhdr_s	mhdr;	/*  pre-built mhdr	*/
-	u8	msg_no;		/*  itnim/rport firmware handle */
-	u8	reqq;		/*  CQ for requests	*/
+	u8			msg_no;	/*  itnim/rport firmware handle */
+	u8			reqq;	/*  CQ for requests	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	struct list_head pending_q;	/*  queue of pending IO requests */
 	struct list_head io_q;		/*  queue of active IO requests */
@@ -181,19 +179,19 @@ struct bfa_itnim_s {
 	bfa_boolean_t   seq_rec;	/*  SQER supported	*/
 	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO	*/
 	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
-	struct bfa_timer_s timer;	/*  pending IO TOV		 */
+	struct bfa_wc_s	wc;		/*  waiting counter	*/
+	struct bfa_timer_s timer;	/*  pending IO TOV	 */
 	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
 	struct bfa_fcpim_mod_s *fcpim;	/*  fcpim module	*/
 	struct bfa_itnim_iostats_s	stats;
 	struct bfa_itnim_ioprofile_s  ioprofile;
 };
 
-
 #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
 #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_IOIM_TAG_2_ID(_iotag)	((_iotag) & BFA_IOIM_IOTAG_MASK)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
-	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
+	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
 #define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)	\
 	(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
 
@@ -201,26 +199,26 @@ struct bfa_itnim_s {
 	(_bfa->modules.fcpim_mod.io_profile_start_time)
 #define bfa_fcpim_get_io_profile(_bfa)	\
 	(_bfa->modules.fcpim_mod.io_profile)
+#define bfa_ioim_update_iotag(__ioim) do {				\
+	uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;	\
+	k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;			\
+	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
+} while (0)
 
 static inline bfa_boolean_t
-bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
+bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
-	u16 k = ioim->iotag;
-
-	k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
-
-	if (k > BFA_IOIM_RETRY_MAX)
+	uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
+	if (k < BFA_IOIM_RETRY_MAX)
 		return BFA_FALSE;
-	ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
-	ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
 	return BFA_TRUE;
 }
+
 /*
  * function prototypes
  */
 void	bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
 					struct bfi_msg_s *msg);
@@ -232,7 +230,6 @@ void	bfa_ioim_tov(struct bfa_ioim_s *ioim);
 
 void	bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
 void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
@@ -248,32 +245,14 @@ void	bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
 void	bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
 bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
-void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
-void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
-
 
 /*
  * bfa fcpim module API functions
  */
-void		bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+void	bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
 u16	bfa_fcpim_path_tov_get(struct bfa_s *bfa);
-void		bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
 u16	bfa_fcpim_qdepth_get(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
-	 struct bfa_itnim_iostats_s *modstats);
-bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
-		struct bfa_itnim_iostats_s *stats, u8 lp_tag);
-bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
-	 struct bfa_fcpim_del_itn_stats_s *modstats);
-bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
-void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
-		struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
-void		bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
-				bfa_boolean_t state);
-void		bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
-bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+
 #define bfa_fcpim_ioredirect_enabled(__bfa)				\
 	(((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
 
@@ -291,48 +270,33 @@ bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
  * bfa itnim API functions
  */
 struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
-					struct bfa_rport_s *rport, void *itnim);
-void		bfa_itnim_delete(struct bfa_itnim_s *itnim);
-void		bfa_itnim_online(struct bfa_itnim_s *itnim,
-				 bfa_boolean_t seq_rec);
-void		bfa_itnim_offline(struct bfa_itnim_s *itnim);
-void		bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
-			struct bfa_itnim_iostats_s *stats);
-void		bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
-bfa_status_t	bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
-		struct bfa_itnim_ioprofile_s *ioprofile);
+		struct bfa_rport_s *rport, void *itnim);
+void bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
+void bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile);
+
 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
 
 /*
- *	BFA completion callback for bfa_itnim_online().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_online().
  */
 void	bfa_cb_itnim_online(void *itnim);
 
 /*
- *	BFA completion callback for bfa_itnim_offline().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_offline().
  */
 void	bfa_cb_itnim_offline(void *itnim);
 void	bfa_cb_itnim_tov_begin(void *itnim);
 void	bfa_cb_itnim_tov(void *itnim);
 
 /*
- *	BFA notification to FCS/driver for second level error recovery.
- *
+ * BFA notification to FCS/driver for second level error recovery.
  * Atleast one I/O request has timedout and target is unresponsive to
  * repeated abort requests. Second level error recovery should be initiated
  * by starting implicit logout and recovery procedures.
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
  */
 void	bfa_cb_itnim_sler(void *itnim);
 
@@ -349,10 +313,8 @@ void		bfa_ioim_start(struct bfa_ioim_s *ioim);
 bfa_status_t	bfa_ioim_abort(struct bfa_ioim_s *ioim);
 void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
 				      bfa_boolean_t iotov);
-
-
 /*
- *	I/O completion notification.
+ * I/O completion notification.
  *
  * @param[in]		dio			driver IO structure
  * @param[in]		io_status		IO completion status
@@ -363,39 +325,31 @@ void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
  *
  * @return None
  */
-void	bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
-				  enum bfi_ioim_status io_status,
-				  u8 scsi_status, int sns_len,
-				  u8 *sns_info, s32 residue);
+void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+			enum bfi_ioim_status io_status,
+			u8 scsi_status, int sns_len,
+			u8 *sns_info, s32 residue);
 
 /*
- *	I/O good completion notification.
- *
- * @param[in]		dio			driver IO structure
- *
- * @return None
+ * I/O good completion notification.
  */
-void	bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
 
 /*
- *	I/O abort completion notification
- *
- * @param[in]		dio			driver IO that was aborted
- *
- * @return None
+ * I/O abort completion notification
  */
-void	bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
 
 /*
  * bfa tskim API functions
  */
-struct bfa_tskim_s	*bfa_tskim_alloc(struct bfa_s *bfa,
-					struct bfad_tskim_s *dtsk);
-void		bfa_tskim_free(struct bfa_tskim_s *tskim);
-void		bfa_tskim_start(struct bfa_tskim_s *tskim,
-				struct bfa_itnim_s *itnim, lun_t lun,
-				enum fcp_tm_cmnd tm, u8 t_secs);
-void		bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
-				  enum bfi_tskim_status tsk_status);
+struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
+			struct bfad_tskim_s *dtsk);
+void bfa_tskim_free(struct bfa_tskim_s *tskim);
+void bfa_tskim_start(struct bfa_tskim_s *tskim,
+			struct bfa_itnim_s *itnim, struct scsi_lun lun,
+			enum fcp_tm_cmnd tm, u8 t_secs);
+void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+			enum bfi_tskim_status tsk_status);
 
 #endif /* __BFA_FCPIM_H__ */

+ 34 - 172
drivers/scsi/bfa/bfa_fcs.c

@@ -19,9 +19,9 @@
  *  bfa_fcs.c BFA FCS main
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, FCS);
 
@@ -76,7 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 	fcs->bfad = bfad;
 	fcs->min_cfg = min_cfg;
 
-	bfa_attach_fcs(bfa);
+	bfa->fcs = BFA_TRUE;
 	fcbuild_init();
 
 	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
@@ -110,14 +110,6 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
 	}
 }
 
-/*
- * Start FCS operations.
- */
-void
-bfa_fcs_start(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_fabric_modstart(fcs);
-}
 
 /*
  *	brief
@@ -138,22 +130,6 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 	bfa_fcs_fabric_psymb_init(&fcs->fabric);
 }
 
-/*
- *	brief
- *		FCS FDMI Driver Parameter Initialization
- *
- *	param[in]		fcs		FCS instance
- *	param[in]		fdmi_enable	TRUE/FALSE
- *
- *	return None
- */
-void
-bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
-{
-
-	fcs->fdmi_enabled = fdmi_enable;
-
-}
 /*
  *	brief
  *		FCS instance cleanup and exit.
@@ -184,18 +160,6 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
 }
 
 
-void
-bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
-{
-	fcs->trcmod = trcmod;
-}
-
-void
-bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
-{
-	bfa_wc_down(&fcs->wc);
-}
-
 /*
  * Fabric module implementation.
  */
@@ -232,31 +196,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
 					 u32 rsp_len,
 					 u32 resid_len,
 					 struct fchs_s *rspfchs);
-/*
- *  fcs_fabric_sm fabric state machine functions
- */
-
-/*
- * Fabric state machine events
- */
-enum bfa_fcs_fabric_event {
-	BFA_FCS_FABRIC_SM_CREATE	= 1,	/*  create from driver	      */
-	BFA_FCS_FABRIC_SM_DELETE	= 2,	/*  delete from driver	      */
-	BFA_FCS_FABRIC_SM_LINK_DOWN	= 3,	/*  link down from port      */
-	BFA_FCS_FABRIC_SM_LINK_UP	= 4,	/*  link up from port	      */
-	BFA_FCS_FABRIC_SM_CONT_OP	= 5,	/*  flogi/auth continue op   */
-	BFA_FCS_FABRIC_SM_RETRY_OP	= 6,	/*  flogi/auth retry op      */
-	BFA_FCS_FABRIC_SM_NO_FABRIC	= 7,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_PERF_EVFP	= 8,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_ISOLATE	= 9,	/*  from EVFP processing     */
-	BFA_FCS_FABRIC_SM_NO_TAGGING	= 10,	/*  no VFT tagging from EVFP */
-	BFA_FCS_FABRIC_SM_DELAYED	= 11,	/*  timeout delay event      */
-	BFA_FCS_FABRIC_SM_AUTH_FAILED	= 12,	/*  auth failed	      */
-	BFA_FCS_FABRIC_SM_AUTH_SUCCESS	= 13,	/*  auth successful	      */
-	BFA_FCS_FABRIC_SM_DELCOMP	= 14,	/*  all vports deleted event */
-	BFA_FCS_FABRIC_SM_LOOPBACK	= 15,	/*  Received our own FLOGI   */
-	BFA_FCS_FABRIC_SM_START		= 16,	/*  from driver	      */
-};
 
 static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
 					 enum bfa_fcs_fabric_event event);
@@ -270,14 +209,8 @@ static void	bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
 					      enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
-					      enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
-					   enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 					   enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
-					 enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
@@ -337,7 +270,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	default:
@@ -410,7 +343,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LOOPBACK:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_set_opertype(fabric);
 		break;
 
@@ -424,12 +357,12 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_delete(fabric);
 		break;
 
@@ -481,7 +414,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -495,7 +428,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
@@ -511,7 +444,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Authentication failed
  */
-static void
+void
 bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
 			      enum bfa_fcs_fabric_event event)
 {
@@ -537,7 +470,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Port is in loopback mode.
  */
-static void
+void
 bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
 			   enum bfa_fcs_fabric_event event)
 {
@@ -573,7 +506,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -596,7 +529,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 /*
  *   Fabric is online - normal operating state.
  */
-static void
+void
 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 			 enum bfa_fcs_fabric_event event)
 {
@@ -606,7 +539,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -617,7 +550,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -697,7 +630,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_DELCOMP:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -724,8 +657,8 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
 	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
 
 	port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
-	port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
-	port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
+	port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
+	port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
 }
 
 /*
@@ -813,7 +746,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 		return;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(fabric->lps)) {
+		switch (fabric->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			fabric->stats.flogi_acc_err++;
 			break;
@@ -840,26 +773,26 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 		return;
 	}
 
-	fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
+	fabric->bb_credit = fabric->lps->pr_bbcred;
 	bfa_trc(fabric->fcs, fabric->bb_credit);
 
-	if (!bfa_lps_is_brcd_fabric(fabric->lps))
-		fabric->fabric_name =  bfa_lps_get_peer_nwwn(fabric->lps);
+	if (!(fabric->lps->brcd_switch))
+		fabric->fabric_name =  fabric->lps->pr_nwwn;
 
 	/*
 	 * Check port type. It should be 1 = F-port.
 	 */
-	if (bfa_lps_is_fport(fabric->lps)) {
-		fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
-		fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
-		fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
+	if (fabric->lps->fport) {
+		fabric->bport.pid = fabric->lps->lp_pid;
+		fabric->is_npiv = fabric->lps->npiv_en;
+		fabric->is_auth = fabric->lps->auth_req;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
 	} else {
 		/*
 		 * Nport-2-Nport direct attached
 		 */
 		fabric->bport.port_topo.pn2n.rem_port_wwn =
-			bfa_lps_get_peer_pwwn(fabric->lps);
+			fabric->lps->pr_pwwn;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
 	}
 
@@ -987,7 +920,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
 	INIT_LIST_HEAD(&fabric->vport_q);
 	INIT_LIST_HEAD(&fabric->vf_q);
 	fabric->lps = bfa_lps_alloc(fcs->bfa);
-	bfa_assert(fabric->lps);
+	WARN_ON(!fabric->lps);
 
 	/*
 	 * Initialize fabric delete completion handler. Fabric deletion is
@@ -1038,31 +971,6 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
 }
 
-/*
- *   Suspend fabric activity as part of driver suspend.
- */
-void
-bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
-{
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-}
-
-enum bfa_port_type
-bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->oper_type;
-}
 
 /*
  *   Link up notification from BFA physical port module.
@@ -1123,40 +1031,6 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
 	bfa_wc_down(&fabric->wc);
 }
 
-/*
- *   Base port is deleted.
- */
-void
-bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
-{
-	bfa_wc_down(&fabric->wc);
-}
-
-
-/*
- *    Check if fabric is online.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return  TRUE/FALSE
- */
-int
-bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
-}
-
-/*
- *	brief
- *
- */
-bfa_status_t
-bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
-		     struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
-	bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
-	return BFA_STATUS_OK;
-}
 
 /*
  * Lookup for a vport withing a fabric given its pwwn
@@ -1176,18 +1050,6 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
 	return NULL;
 }
 
-/*
- *    In a given fabric, return the number of lports.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return : 1 or more.
- */
-u16
-bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->num_vports;
-}
 
 /*
  *  Get OUI of the attached switch.
@@ -1207,7 +1069,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
 	u8 *tmp;
 	u16 oui;
 
-	fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
+	fab_nwwn = fabric->lps->pr_nwwn;
 
 	tmp = (u8 *)&fab_nwwn;
 	oui = (tmp[3] << 8) | tmp[4];
@@ -1235,7 +1097,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
 	 * external loopback cable is in place. Our own FLOGI frames are
 	 * sometimes looped back when switch port gets temporarily bypassed.
 	 */
-	if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
+	if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
 	    (els_cmd->els_code == FC_ELS_FLOGI) &&
 	    (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
@@ -1245,7 +1107,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
 	/*
 	 * FLOGI/EVFP exchanges should be consumed by base fabric.
 	 */
-	if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
+	if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
 		bfa_trc(fabric->fcs, pid);
 		bfa_fcs_fabric_process_uf(fabric, fchs, len);
 		return;
@@ -1358,13 +1220,13 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
 		return;
 
 	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				    bfa_os_hton3b(FC_FABRIC_PORT),
+				    bfa_hton3b(FC_FABRIC_PORT),
 				    n2n_port->reply_oxid, pcfg->pwwn,
 				    pcfg->nwwn,
 				    bfa_fcport_get_maxfrsize(bfa),
 				    bfa_fcport_get_rx_bbcredit(bfa));
 
-	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
+	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
 		      BFA_FALSE, FC_CLASS_3,
 		      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
 		      FC_MAX_PDUSZ, 0);
@@ -1455,7 +1317,7 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -1502,7 +1364,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
 		 * drop frame if vfid is unknown
 		 */
 		if (!fabric) {
-			bfa_assert(0);
+			WARN_ON(1);
 			bfa_stats(fcs, uf.vfid_unknown);
 			bfa_uf_free(uf);
 			return;

+ 81 - 43
drivers/scsi/bfa/bfa_fcs.h

@@ -26,6 +26,22 @@
 
 #define BFA_FCS_OS_STR_LEN		64
 
+/*
+ *  lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+	BFA_LPS_SM_LOGIN	= 1,	/* login request from user      */
+	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user     */
+	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout */
+	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue  */
+	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user         */
+	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline              */
+	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link        */
+	BFA_LPS_SM_SET_N2N_PID  = 8,	/* Set assigned PID for n2n */
+};
+
+
 /*
  * !!! Only append to the enums defined here to avoid any versioning
  * !!! needed between trace utility and driver version
@@ -41,13 +57,12 @@ enum {
 struct bfa_fcs_s;
 
 #define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
-void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
 
 #define BFA_FCS_BRCD_SWITCH_OUI  0x051e
 #define N2N_LOCAL_PID	    0x010000
 #define N2N_REMOTE_PID		0x020000
 #define	BFA_FCS_RETRY_TIMEOUT 2000
-#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_os_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
 
 
 
@@ -109,7 +124,7 @@ struct bfa_fcs_lport_loop_s {
 
 struct bfa_fcs_lport_n2n_s {
 	u32        rsvd;
-	u16        reply_oxid;	/*  ox_id from the req flogi to be
+	__be16     reply_oxid;	/*  ox_id from the req flogi to be
 					 *used in flogi acc */
 	wwn_t           rem_port_wwn;	/*  Attached port's wwn */
 };
@@ -316,8 +331,6 @@ void            bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
 				       struct bfa_fcs_rport_s *rport);
 void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
 				       struct bfa_fcs_rport_s *rport);
-void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
 void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
@@ -359,9 +372,6 @@ bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
 bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
 			    struct bfa_vport_attr_s *vport_attr);
-void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			     struct bfa_vport_stats_s *vport_stats);
-void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
 struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
 					     u16 vf_id, wwn_t vpwwn);
 void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
@@ -406,7 +416,7 @@ struct bfa_fcs_rport_s {
 	struct bfad_rport_s	*rp_drv;	/*  driver peer instance */
 	u32	pid;	/*  port ID of rport */
 	u16	maxfrsize;	/*  maximum frame size */
-	u16	reply_oxid;	/*  OX_ID of inbound requests */
+	__be16	reply_oxid;	/*  OX_ID of inbound requests */
 	enum fc_cos	fc_cos;	/*  FC classes of service supp */
 	bfa_boolean_t	cisc;	/*  CISC capable device */
 	bfa_boolean_t	prlo;	/*  processing prlo or LOGO */
@@ -437,32 +447,18 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
 /*
  * bfa fcs rport API functions
  */
-bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-			       struct bfa_fcs_rport_s *rport,
-			       struct bfad_rport_s *rport_drv);
-bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			    struct bfa_rport_attr_s *attr);
-void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			     struct bfa_rport_stats_s *stats);
-void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
 					     wwn_t rpwwn);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
 	struct bfa_fcs_lport_s *port, wwn_t rnwwn);
 void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
 
-void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
-			     enum bfa_port_speed speed);
 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
 	 struct fchs_s *fchs, u16 len);
 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
 
 struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
 	 u32 pid);
-void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
 			 struct fc_logi_s *plogi_rsp);
 void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
@@ -470,10 +466,8 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
 				struct fc_logi_s *plogi);
 void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
 			 struct fc_logi_s *plogi);
-void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
 
-void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
 int  bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
@@ -618,7 +612,7 @@ struct bfa_fcs_fdmi_hba_attr_s {
 	u8         option_rom_ver[BFA_VERSION_LEN];
 	u8         fw_version[8];
 	u8         os_name[256];
-	u32        max_ct_pyld;
+	__be32        max_ct_pyld;
 };
 
 /*
@@ -626,9 +620,9 @@ struct bfa_fcs_fdmi_hba_attr_s {
  */
 struct bfa_fcs_fdmi_port_attr_s {
 	u8         supp_fc4_types[32];	/* supported FC4 types */
-	u32        supp_speed;	/* supported speed */
-	u32        curr_speed;	/* current Speed */
-	u32        max_frm_size;	/* max frame size */
+	__be32        supp_speed;	/* supported speed */
+	__be32        curr_speed;	/* current Speed */
+	__be32        max_frm_size;	/* max frame size */
 	u8         os_device_name[256];	/* OS device Name */
 	u8         host_name[256];	/* host name */
 };
@@ -663,6 +657,57 @@ struct bfa_fcs_s {
 	struct bfa_wc_s		wc;	/*  waiting counter */
 };
 
+/*
+ *  fcs_fabric_sm fabric state machine functions
+ */
+
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+	BFA_FCS_FABRIC_SM_CREATE        = 1,    /*  create from driver        */
+	BFA_FCS_FABRIC_SM_DELETE        = 2,    /*  delete from driver        */
+	BFA_FCS_FABRIC_SM_LINK_DOWN     = 3,    /*  link down from port      */
+	BFA_FCS_FABRIC_SM_LINK_UP       = 4,    /*  link up from port         */
+	BFA_FCS_FABRIC_SM_CONT_OP       = 5,    /*  flogi/auth continue op   */
+	BFA_FCS_FABRIC_SM_RETRY_OP      = 6,    /*  flogi/auth retry op      */
+	BFA_FCS_FABRIC_SM_NO_FABRIC     = 7,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_PERF_EVFP     = 8,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_ISOLATE       = 9,    /*  from EVFP processing     */
+	BFA_FCS_FABRIC_SM_NO_TAGGING    = 10,   /*  no VFT tagging from EVFP */
+	BFA_FCS_FABRIC_SM_DELAYED       = 11,   /*  timeout delay event      */
+	BFA_FCS_FABRIC_SM_AUTH_FAILED   = 12,   /*  auth failed       */
+	BFA_FCS_FABRIC_SM_AUTH_SUCCESS  = 13,   /*  auth successful           */
+	BFA_FCS_FABRIC_SM_DELCOMP       = 14,   /*  all vports deleted event */
+	BFA_FCS_FABRIC_SM_LOOPBACK      = 15,   /*  Received our own FLOGI   */
+	BFA_FCS_FABRIC_SM_START         = 16,   /*  from driver       */
+};
+
+/*
+ *  fcs_rport_sm FCS rport state machine events
+ */
+
+enum rport_event {
+	RPSM_EVENT_PLOGI_SEND   = 1,    /*  new rport; start with PLOGI */
+	RPSM_EVENT_PLOGI_RCVD   = 2,    /*  Inbound PLOGI from remote port */
+	RPSM_EVENT_PLOGI_COMP   = 3,    /*  PLOGI completed to rport    */
+	RPSM_EVENT_LOGO_RCVD    = 4,    /*  LOGO from remote device     */
+	RPSM_EVENT_LOGO_IMP     = 5,    /*  implicit logo for SLER      */
+	RPSM_EVENT_FCXP_SENT    = 6,    /*  Frame from has been sent    */
+	RPSM_EVENT_DELETE       = 7,    /*  RPORT delete request        */
+	RPSM_EVENT_SCN          = 8,    /*  state change notification   */
+	RPSM_EVENT_ACCEPTED     = 9,    /*  Good response from remote device */
+	RPSM_EVENT_FAILED       = 10,   /*  Request to rport failed.    */
+	RPSM_EVENT_TIMEOUT      = 11,   /*  Rport SM timeout event      */
+	RPSM_EVENT_HCB_ONLINE  = 12,    /*  BFA rport online callback   */
+	RPSM_EVENT_HCB_OFFLINE = 13,    /*  BFA rport offline callback  */
+	RPSM_EVENT_FC4_OFFLINE = 14,    /*  FC-4 offline complete       */
+	RPSM_EVENT_ADDRESS_CHANGE = 15, /*  Rport's PID has changed     */
+	RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
+	RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
+	RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continously */
+};
+
 /*
  * bfa fcs API functions
  */
@@ -672,16 +717,12 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
 void bfa_fcs_init(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 			      struct bfa_fcs_driver_info_s *driver_info);
-void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
-void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
-void		bfa_fcs_start(struct bfa_fcs_s *fcs);
 
 /*
  * bfa fcs vf public functions
  */
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
-u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 
 /*
  * fabric protected interface functions
@@ -689,32 +730,29 @@ u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
 void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
-int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
 struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
 		struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
 void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
 		struct fchs_s *fchs, u16 len);
-bfa_boolean_t	bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
-bfa_boolean_t	bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
-enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
 void	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
-void	bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
-bfa_status_t	bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
-			struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
-			struct bfad_vf_s *vf_drv);
 void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
 	       wwn_t fabric_name);
 u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
 
 /*
  * BFA FCS callback interfaces

+ 15 - 15
drivers/scsi/bfa/bfa_fcs_fcpim.c

@@ -19,9 +19,9 @@
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 #include "bfad_im.h"
 
 BFA_TRC_FILE(FCS, FCPIM);
@@ -103,7 +103,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
 		break;
 
 	case BFA_FCS_ITNIM_SM_OFFLINE:
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -140,7 +140,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -181,7 +181,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_discard(itnim->fcxp);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -217,7 +217,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 		} else {
 			/* invoke target offline */
 			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-			bfa_fcs_rport_logo_imp(itnim->rport);
+			bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 		}
 		break;
 
@@ -225,7 +225,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_timer_stop(&itnim->timer);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -269,7 +269,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_itnim_offline(itnim->bfa_itnim);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -330,7 +330,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -358,7 +358,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -536,7 +536,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 	if (bfa_itnim == NULL) {
 		bfa_trc(port->fcs, rport->pwwn);
 		bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
-		bfa_assert(0);
+		WARN_ON(1);
 		return NULL;
 	}
 
@@ -688,7 +688,7 @@ bfa_cb_itnim_sler(void *cb_arg)
 
 	itnim->stats.sler++;
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
-	bfa_fcs_rport_logo_imp(itnim->rport);
+	bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 }
 
 struct bfa_fcs_itnim_s *
@@ -700,7 +700,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 	if (!rport)
 		return NULL;
 
-	bfa_assert(rport->itnim != NULL);
+	WARN_ON(rport->itnim == NULL);
 	return rport->itnim;
 }
 
@@ -729,7 +729,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -746,7 +746,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -778,6 +778,6 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }

+ 177 - 213
drivers/scsi/bfa/bfa_fcs_lport.c

@@ -15,10 +15,10 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, PORT);
 
@@ -159,7 +159,7 @@ bfa_fcs_lport_sm_online(
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -197,7 +197,7 @@ bfa_fcs_lport_sm_offline(
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -309,6 +309,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 			return;
 		}
 		port->pid  = rx_fchs->d_id;
+		bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 	}
 
 	/*
@@ -323,6 +324,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 			(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
 			(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
 			port->pid  = rx_fchs->d_id;
+			bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 			rport->pid = rx_fchs->s_id;
 		}
 		bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -349,8 +351,8 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 		 * This is a different device with the same pid. Old device
 		 * disappeared. Send implicit LOGO to old device.
 		 */
-		bfa_assert(rport->pwwn != plogi->port_name);
-		bfa_fcs_rport_logo_imp(rport);
+		WARN_ON(rport->pwwn == plogi->port_name);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 
 		/*
 		 * Inbound PLOGI from a new device (with old PID).
@@ -362,7 +364,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
 	/*
 	 * PLOGI crossing each other.
 	 */
-	bfa_assert(rport->pwwn == WWN_NULL);
+	WARN_ON(rport->pwwn != WWN_NULL);
 	bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
 }
 
@@ -511,7 +513,8 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 	__port_action[port->fabric->fab_type].offline(port);
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
-	if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
+	if (bfa_sm_cmp_state(port->fabric,
+			bfa_fcs_fabric_sm_online) == BFA_TRUE)
 		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
@@ -522,26 +525,26 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
 
 	list_for_each_safe(qe, qen, &port->rport_q) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		bfa_fcs_rport_offline(rport);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 	}
 }
 
 static void
 bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
@@ -584,33 +587,11 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
 				port->vport ? port->vport->vport_drv : NULL);
 		bfa_fcs_vport_delete_comp(port->vport);
 	} else {
-		 bfa_fcs_fabric_port_delete_comp(port->fabric);
+		bfa_wc_down(&port->fabric->wc);
 	}
 }
 
 
-
-/*
- *  fcs_lport_api BFA FCS port API
- */
-/*
- *   Module initialization
- */
-void
-bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
-{
-
-}
-
-/*
- *   Module cleanup
- */
-void
-bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
 /*
  * Unsolicited frame receive handling.
  */
@@ -623,6 +604,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
 	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
 
 	bfa_stats(lport, uf_recvs);
+	bfa_trc(lport->fcs, fchs->type);
 
 	if (!bfa_fcs_lport_is_online(lport)) {
 		bfa_stats(lport, uf_recv_drops);
@@ -682,8 +664,11 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
 	 * Only handles ELS frames for now.
 	 */
 	if (fchs->type != FC_TYPE_ELS) {
-		bfa_trc(lport->fcs, fchs->type);
-		bfa_assert(0);
+		bfa_trc(lport->fcs, fchs->s_id);
+		bfa_trc(lport->fcs, fchs->d_id);
+		/* ignore type FC_TYPE_FC_FSS */
+		if (fchs->type != FC_TYPE_FC_FSS)
+			bfa_sm_fault(lport->fcs, fchs->type);
 		return;
 	}
 
@@ -792,7 +777,7 @@ bfa_fcs_lport_del_rport(
 	struct bfa_fcs_lport_s *port,
 	struct bfa_fcs_rport_s *rport)
 {
-	bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
+	WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
 	list_del(&rport->qe);
 	port->num_rports--;
 
@@ -850,8 +835,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
 	lport->fcs = fcs;
 	lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
 	lport->vport = vport;
-	lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
-				  bfa_lps_get_tag(lport->fabric->lps);
+	lport->lp_tag = (vport) ? vport->lps->lp_tag :
+				  lport->fabric->lps->lp_tag;
 
 	INIT_LIST_HEAD(&lport->rport_q);
 	lport->num_rports = 0;
@@ -903,10 +888,12 @@ bfa_fcs_lport_get_attr(
 	port_attr->port_cfg = port->port_cfg;
 
 	if (port->fabric) {
-		port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
-		port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+		port_attr->port_type = port->fabric->oper_type;
+		port_attr->loopback = bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_loopback);
 		port_attr->authfail =
-			bfa_fcs_fabric_is_auth_failed(port->fabric);
+			bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_auth_failed);
 		port_attr->fabric_name  = bfa_fcs_lport_get_fabric_name(port);
 		memcpy(port_attr->fabric_ip_addr,
 			bfa_fcs_lport_get_fabric_ipaddr(port),
@@ -915,10 +902,10 @@ bfa_fcs_lport_get_attr(
 		if (port->vport != NULL) {
 			port_attr->port_type = BFA_PORT_TYPE_VPORT;
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->vport->lps);
+				port->vport->lps->lp_mac;
 		} else {
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->fabric->lps);
+				port->fabric->lps->lp_mac;
 		}
 	} else {
 		port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
@@ -998,6 +985,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 	    ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
 	     sizeof(wwn_t)) > 0) {
 		port->pid = N2N_LOCAL_PID;
+		bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
 		/*
 		 * First, check if we know the device by pwwn.
 		 */
@@ -1007,7 +995,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 			bfa_trc(port->fcs, rport->pid);
 			bfa_trc(port->fcs, rport->pwwn);
 			rport->pid = N2N_REMOTE_PID;
-			bfa_fcs_rport_online(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
 			return;
 		}
 
@@ -1017,10 +1005,10 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
 		 */
 		if (port->num_rports > 0) {
 			rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
-			bfa_assert(rport != NULL);
+			WARN_ON(rport == NULL);
 			if (rport) {
 				bfa_trc(port->fcs, rport->pwwn);
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		bfa_fcs_rport_create(port, N2N_REMOTE_PID);
@@ -1569,6 +1557,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	struct fdmi_attr_s *attr;
 	u8        *curr_ptr;
 	u16        len, count;
+	u16	templen;
 
 	/*
 	 * get hba attributes
@@ -1594,69 +1583,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
-	attr->len = sizeof(wwn_t);
-	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(wwn_t);
+	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Manufacturer
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
-	attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
-	memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->manufacturer);
+	memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Serial Number
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
-	attr->len = (u16) strlen(fcs_hba_attr->serial_num);
-	memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->serial_num);
+	memcpy(attr->value, fcs_hba_attr->serial_num, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
-	attr->len = (u16) strlen(fcs_hba_attr->model);
-	memcpy(attr->value, fcs_hba_attr->model, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model);
+	memcpy(attr->value, fcs_hba_attr->model, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model Desc
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
-	attr->len = (u16) strlen(fcs_hba_attr->model_desc);
-	memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model_desc);
+	memcpy(attr->value, fcs_hba_attr->model_desc, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * H/W Version
@@ -1664,14 +1653,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->hw_version[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->hw_version);
-		memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->hw_version);
+		memcpy(attr->value, fcs_hba_attr->hw_version, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1679,14 +1668,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Option Rom Version
@@ -1694,14 +1683,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
-		memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
+		memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1709,14 +1698,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Name
@@ -1724,14 +1713,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	if (fcs_hba_attr->os_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
-		attr->len = (u16) strlen(fcs_hba_attr->os_name);
-		memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->os_name);
+		memcpy(attr->value, fcs_hba_attr->os_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 
 	/*
@@ -1739,12 +1728,12 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
-	attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
-	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
-	len += attr->len;
+	templen = sizeof(fcs_hba_attr->max_ct_pyld);
+	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Update size of payload
@@ -1845,6 +1834,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	u8        *curr_ptr;
 	u16        len;
 	u8	count = 0;
+	u16	templen;
 
 	/*
 	 * get port attributes
@@ -1863,54 +1853,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
-	attr->len = sizeof(fcs_port_attr.supp_fc4_types);
-	memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_fc4_types);
+	memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Supported Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
-	attr->len = sizeof(fcs_port_attr.supp_speed);
-	memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_speed);
+	memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * current Port Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
-	attr->len = sizeof(fcs_port_attr.curr_speed);
-	memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.curr_speed);
+	memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * max frame size
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
-	attr->len = sizeof(fcs_port_attr.max_frm_size);
-	memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.max_frm_size);
+	memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Device Name
@@ -1918,14 +1908,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	if (fcs_port_attr.os_device_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.os_device_name);
-		memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.os_device_name);
+		memcpy(attr->value, fcs_port_attr.os_device_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 	/*
 	 * Host Name
@@ -1933,14 +1923,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
 	if (fcs_port_attr.host_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.host_name);
-		memcpy(attr->value, fcs_port_attr.host_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.host_name);
+		memcpy(attr->value, fcs_port_attr.host_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-				sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
 	}
 
 	/*
@@ -2103,7 +2093,7 @@ bfa_fcs_lport_fdmi_timeout(void *arg)
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
 {
@@ -2147,7 +2137,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 	hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			  struct bfa_fcs_fdmi_port_attr_s *port_attr)
 {
@@ -2560,7 +2550,7 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
 	len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2760,7 +2750,7 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
 	len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2836,7 +2826,7 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 	ms->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_MGMT_SERVER),
+			     bfa_hton3b(FC_MGMT_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -3593,7 +3583,7 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
 	ns->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_NAME_SERVER),
+			     bfa_hton3b(FC_NAME_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -4150,7 +4140,7 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
 	bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
 }
 
-void
+static void
 bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
 {
 
@@ -4163,7 +4153,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
 
 	for (ii = 0 ; ii < nwwns; ++ii) {
 		rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
-		bfa_assert(rport);
+		WARN_ON(!rport);
 	}
 }
 
@@ -4352,8 +4342,8 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 	/* Handle VU registrations for Base port only */
 	if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
 		len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				bfa_lps_is_brcd_fabric(port->fabric->lps),
-							port->pid, 0);
+				port->fabric->lps->brcd_switch,
+				port->pid, 0);
 	} else {
 	    len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 				    BFA_FALSE,
@@ -4626,7 +4616,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
 
 
 		default:
-			bfa_assert(0);
+			WARN_ON(1);
 			nsquery = BFA_TRUE;
 		}
 	}
@@ -4672,7 +4662,7 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
 
 	while ((qe != qh) && (i < nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4720,7 +4710,7 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
 
 	while ((qe != qh) && (i < *nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4771,7 +4761,7 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
 
 	while (qe != qh) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
+		if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
 			(bfa_fcs_rport_get_state(rport) ==
 			  BFA_RPORT_OFFLINE)) {
 			qe = bfa_q_next(qe);
@@ -4807,7 +4797,7 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
 	struct bfa_fcs_vport_s *vport;
 	bfa_fcs_vf_t   *vf;
 
-	bfa_assert(fcs != NULL);
+	WARN_ON(fcs == NULL);
 
 	vf = bfa_fcs_vf_lookup(fcs, vf_id);
 	if (vf == NULL) {
@@ -4853,7 +4843,7 @@ bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
 		port_info->max_vports_supp =
 			bfa_lps_get_max_vport(port->fcs->bfa);
 		port_info->num_vports_inuse =
-			bfa_fcs_fabric_vport_count(port->fabric);
+			port->fabric->num_vports;
 		port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
 		port_info->num_rports_inuse = port->num_rports;
 	} else {
@@ -4997,7 +4987,8 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_START:
-		if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
+		if (bfa_sm_cmp_state(__vport_fabric(vport),
+					bfa_fcs_fabric_sm_online)
 		    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
 			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
 			bfa_fcs_vport_do_fdisc(vport);
@@ -5080,13 +5071,13 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
 	switch (event) {
 	case BFA_FCS_VPORT_SM_DELETE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_delete(&vport->lport);
 		break;
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_VPORT_SM_RSP_OK:
@@ -5166,7 +5157,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_offline(&vport->lport);
 		break;
 
@@ -5266,7 +5257,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_OFFLINE:
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		/*
 		 * !!! fall through !!!
 		 */
@@ -5305,14 +5296,14 @@ bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
 static void
 bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 {
-	u8		lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
-	u8		lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
+	u8		lsrjt_rsn = vport->lps->lsrjt_rsn;
+	u8		lsrjt_expl = vport->lps->lsrjt_expl;
 
 	bfa_trc(__vport_fcs(vport), lsrjt_rsn);
 	bfa_trc(__vport_fcs(vport), lsrjt_expl);
 
 	/* For certain reason codes, we don't want to retry. */
-	switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
+	switch (vport->lps->lsrjt_expl) {
 	case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
 	case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
 		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
@@ -5476,7 +5467,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
 	if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
 		return BFA_STATUS_VPORT_EXISTS;
 
-	if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
+	if (fcs->fabric.num_vports ==
 			bfa_lps_get_max_vport(fcs->bfa))
 		return BFA_STATUS_VPORT_MAX;
 
@@ -5618,33 +5609,6 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
 	attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
 }
 
-/*
- *	Use this function to get vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *	param[out]	stats	pointer to return vport statistics in
- *
- *	return None
- */
-void
-bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			struct bfa_vport_stats_s *stats)
-{
-	*stats = vport->vport_stats;
-}
-
-/*
- *	Use this function to clear vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *
- *	return None
- */
-void
-bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
-{
-	memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
-}
 
 /*
  *	Lookup a virtual port. Excludes base port from lookup.
@@ -5684,7 +5648,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
 		/*
 		 * Initialiaze the V-Port fields
 		 */
-		__vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
+		__vport_fcid(vport) = vport->lps->lp_pid;
 		vport->vport_stats.fdisc_accepts++;
 		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 		break;
@@ -5697,7 +5661,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
 		break;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(vport->lps)) {
+		switch (vport->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			vport->vport_stats.fdisc_acc_bad++;
 			break;

+ 17 - 213
drivers/scsi/bfa/bfa_fcs_rport.c

@@ -19,9 +19,9 @@
  *  rport.c Remote port implementation.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, RPORT);
 
@@ -75,30 +75,6 @@ static void	bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
 static void	bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
 				struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/*
- *  fcs_rport_sm FCS rport state machine events
- */
-
-enum rport_event {
-	RPSM_EVENT_PLOGI_SEND	= 1,	/*  new rport; start with PLOGI */
-	RPSM_EVENT_PLOGI_RCVD	= 2,	/*  Inbound PLOGI from remote port */
-	RPSM_EVENT_PLOGI_COMP	= 3,	/*  PLOGI completed to rport	*/
-	RPSM_EVENT_LOGO_RCVD	= 4,	/*  LOGO from remote device	*/
-	RPSM_EVENT_LOGO_IMP	= 5,	/*  implicit logo for SLER	*/
-	RPSM_EVENT_FCXP_SENT	= 6,	/*  Frame from has been sent	*/
-	RPSM_EVENT_DELETE	= 7,	/*  RPORT delete request	*/
-	RPSM_EVENT_SCN		= 8,	/*  state change notification	*/
-	RPSM_EVENT_ACCEPTED	= 9,	/*  Good response from remote device */
-	RPSM_EVENT_FAILED	= 10,	/*  Request to rport failed.	*/
-	RPSM_EVENT_TIMEOUT	= 11,	/*  Rport SM timeout event	*/
-	RPSM_EVENT_HCB_ONLINE  = 12,	/*  BFA rport online callback	*/
-	RPSM_EVENT_HCB_OFFLINE = 13,	/*  BFA rport offline callback	*/
-	RPSM_EVENT_FC4_OFFLINE = 14,	/*  FC-4 offline complete	*/
-	RPSM_EVENT_ADDRESS_CHANGE = 15,	/*  Rport's PID has changed	*/
-	RPSM_EVENT_ADDRESS_DISC = 16,	/*  Need to Discover rport's PID */
-	RPSM_EVENT_PRLO_RCVD   = 17,	/*  PRLO from remote device	*/
-	RPSM_EVENT_PLOGI_RETRY = 18,	/*  Retry PLOGI continously */
-};
 
 static void	bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
 					enum rport_event event);
@@ -498,24 +474,24 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
 	case RPSM_EVENT_DELETE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -824,7 +800,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_DELETE:
@@ -856,7 +832,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	default:
@@ -878,7 +854,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -1459,7 +1435,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 			twin->stats.plogi_rcvd	  += rport->stats.plogi_rcvd;
 			twin->stats.plogi_accs++;
 
-			bfa_fcs_rport_delete(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 
 			bfa_fcs_rport_update(twin, plogi_rsp);
 			twin->pid = rsp_fchs->s_id;
@@ -1992,13 +1968,14 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
 	/*
 	 * allocate FC-4s
 	 */
-	bfa_assert(bfa_fcs_lport_is_initiator(port));
+	WARN_ON(!bfa_fcs_lport_is_initiator(port));
 
 	if (bfa_fcs_lport_is_initiator(port)) {
 		rport->itnim = bfa_fcs_itnim_create(rport);
 		if (!rport->itnim) {
 			bfa_trc(fcs, rpid);
-			bfa_rport_delete(rport->bfa_rport);
+			bfa_sm_send_event(rport->bfa_rport,
+						BFA_RPORT_SM_DELETE);
 			kfree(rport_drv);
 			return NULL;
 		}
@@ -2032,7 +2009,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
 			bfa_fcs_rpf_rport_offline(rport);
 	}
 
-	bfa_rport_delete(rport->bfa_rport);
+	bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
 	bfa_fcs_lport_del_rport(port, rport);
 	kfree(rport->rp_drv);
 }
@@ -2307,39 +2284,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-/*
- * Called by bport/vport to delete a remote port instance.
- *
- * Rport delete is called under the following conditions:
- *		- vport is deleted
- *		- vf is deleted
- *		- explicit request from OS to delete rport
- */
-void
-bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
-}
 
-/*
- * Called by bport/vport to  when a target goes offline.
- *
- */
-void
-bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
-/*
- * Called by bport in n2n when a target (attached port) becomes online.
- *
- */
-void
-bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
-}
 /*
  *	Called by bport/vport to notify SCN for the remote port
  */
@@ -2350,23 +2295,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
 	bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-/*
- *	Called by	fcpim to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
-
-/*
- *	Called by fcptm to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
 
 /*
  *	brief
@@ -2461,15 +2389,6 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
 	bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/*
- *		Called to process any unsolicted frames from this remote port
- */
-void
-bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
 /*
  *		Called to process any unsolicted frames from this remote port
  */
@@ -2586,6 +2505,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
 	return bfa_sm_to_state(rport_sm_table, rport->sm);
 }
 
+
 /*
  *	brief
  *		 Called by the Driver to set rport delete/ageout timeout
@@ -2602,7 +2522,7 @@ bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
 		bfa_fcs_rport_del_timeout = rport_tmo * 1000;
 }
 void
-bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
+bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
 {
 	bfa_trc(rport->fcs, rport->pid);
 
@@ -2621,106 +2541,6 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
  *  fcs_rport_api FCS rport API.
  */
 
-/*
- *	Direct API to add a target by port wwn. This interface is used, for
- *	example, by bios when target pwwn is known from boot lun configuration.
- */
-bfa_status_t
-bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-		struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
-{
-	bfa_trc(port->fcs, *pwwn);
-
-	return BFA_STATUS_OK;
-}
-
-/*
- *	Direct API to remove a target and its associated resources. This
- *	interface is used, for example, by driver to remove target
- *	ports from the target list for a VM.
- */
-bfa_status_t
-bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
-{
-
-	struct bfa_fcs_rport_s *rport;
-
-	bfa_trc(rport_in->fcs, rport_in->pwwn);
-
-	rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
-	if (rport == NULL) {
-		/*
-		 * TBD Error handling
-		 */
-		bfa_trc(rport_in->fcs, rport_in->pid);
-		return BFA_STATUS_UNKNOWN_RWWN;
-	}
-
-	/*
-	 * TBD if this remote port is online, send a logo
-	 */
-	return BFA_STATUS_OK;
-
-}
-
-/*
- *	Remote device status for display/debug.
- */
-void
-bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_attr_s *rport_attr)
-{
-	struct bfa_rport_qos_attr_s qos_attr;
-	bfa_fcs_lport_t *port = rport->port;
-	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
-
-	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
-
-	rport_attr->pid = rport->pid;
-	rport_attr->pwwn = rport->pwwn;
-	rport_attr->nwwn = rport->nwwn;
-	rport_attr->cos_supported = rport->fc_cos;
-	rport_attr->df_sz = rport->maxfrsize;
-	rport_attr->state = bfa_fcs_rport_get_state(rport);
-	rport_attr->fc_cos = rport->fc_cos;
-	rport_attr->cisc = rport->cisc;
-	rport_attr->scsi_function = rport->scsi_function;
-	rport_attr->curr_speed  = rport->rpf.rpsc_speed;
-	rport_attr->assigned_speed  = rport->rpf.assigned_speed;
-
-	bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
-	rport_attr->qos_attr = qos_attr;
-
-	rport_attr->trl_enforced = BFA_FALSE;
-	if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
-		if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
-			/* Use default ratelim speed setting */
-			rport_speed =
-				bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
-		}
-
-		if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
-			rport_attr->trl_enforced = BFA_TRUE;
-	}
-}
-
-/*
- *	Per remote device statistics.
- */
-void
-bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_stats_s *stats)
-{
-	*stats = rport->stats;
-}
-
-void
-bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
-{
-	memset((char *)&rport->stats, 0,
-			sizeof(struct bfa_rport_stats_s));
-}
-
 struct bfa_fcs_rport_s *
 bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
@@ -2751,22 +2571,6 @@ bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
 	return rport;
 }
 
-/*
- * This API is to set the Rport's speed. Should be used when RPSC is not
- * supported by the rport.
- */
-void
-bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
-{
-	rport->rpf.assigned_speed  = speed;
-
-	/* Set this speed in f/w only if the RPSC speed is not available */
-	if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
-		bfa_rport_speed(rport->bfa_rport, speed);
-}
-
-
-
 /*
  * Remote port features (RPF) implementation.
  */
@@ -2827,7 +2631,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
 	case RPFSM_EVENT_RPORT_ONLINE:
 		/* Send RPSC2 to a Brocade fabric only. */
 		if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
-			((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
+			((rport->port->fabric->lps->brcd_switch) ||
 			(bfa_fcs_fabric_get_switch_oui(fabric) ==
 						BFA_FCS_BRCD_SWITCH_OUI))) {
 			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
@@ -3093,7 +2897,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
 		bfa_trc(rport->fcs, num_ents);
 		if (num_ents > 0) {
-			bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
+			WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
 			bfa_trc(rport->fcs,
 				be16_to_cpu(rpsc2_acc->port_info[0].pid));
 			bfa_trc(rport->fcs,

+ 2 - 1
drivers/scsi/bfa/bfa_hw_cb.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_cbreg.h"
 
@@ -110,7 +111,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
 {
 	int i;
 
-	bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
+	WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
 
 	bfa->msix.nvecs = nvecs;
 	if (nvecs == 1) {

+ 3 - 2
drivers/scsi/bfa/bfa_hw_ct.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
 
@@ -116,7 +117,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
 void
 bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 {
-	bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX));
+	WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
 	bfa_trc(bfa, nvecs);
 
 	bfa->msix.nvecs = nvecs;
@@ -143,7 +144,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
 	for (; i <= BFA_MSIX_RME_Q3; i++)
 		bfa->msix.handler[i] = bfa_msix_rspq;
 
-	bfa_assert(i == BFA_MSIX_LPU_ERR);
+	WARN_ON(i != BFA_MSIX_LPU_ERR);
 	bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
 }
 

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 269 - 232
drivers/scsi/bfa/bfa_ioc.c


+ 21 - 23
drivers/scsi/bfa/bfa_ioc.h

@@ -18,10 +18,15 @@
 #ifndef __BFA_IOC_H__
 #define __BFA_IOC_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfi.h"
 
+#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN					\
+	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
+	(sizeof(struct bfa_trc_mod_s) -				\
+	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 /*
  * BFA timer declarations
  */
@@ -47,7 +52,6 @@ struct bfa_timer_mod_s {
 #define BFA_TIMER_FREQ 200 /* specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
-void bfa_timer_init(struct bfa_timer_mod_s *mod);
 void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
 			bfa_timer_cbfn_t timercb, void *arg,
 			unsigned int timeout);
@@ -70,7 +74,7 @@ struct bfa_sge_s {
 #define bfa_swap_words(_x)  (	\
 	((_x) << 32) | ((_x) >> 32))
 
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 #define bfa_sge_to_be(_x)
 #define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
 #define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
@@ -115,8 +119,8 @@ struct bfa_dma_s {
 static inline void
 __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) pa;
-	dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = (__be32) pa;
+	dma_addr->a32.addr_hi = (__be32) (pa >> 32);
 }
 
 
@@ -125,8 +129,8 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 static inline void
 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
-	dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = cpu_to_be32(pa);
+	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 }
 
 struct bfa_ioc_regs_s {
@@ -145,8 +149,11 @@ struct bfa_ioc_regs_s {
 	void __iomem *host_page_num_fn;
 	void __iomem *heartbeat;
 	void __iomem *ioc_fwstate;
+	void __iomem *alt_ioc_fwstate;
 	void __iomem *ll_halt;
+	void __iomem *alt_ll_halt;
 	void __iomem *err_set;
+	void __iomem *ioc_fail_sync;
 	void __iomem *shirq_isr_next;
 	void __iomem *shirq_msk_next;
 	void __iomem *smem_page_start;
@@ -254,8 +261,12 @@ struct bfa_ioc_hwif_s {
 	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
 					bfa_boolean_t msix);
-	void		(*ioc_notify_hbfail)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
+	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -325,7 +336,6 @@ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		enum bfi_mclass mc);
-u32 bfa_ioc_meminfo(void);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
@@ -340,6 +350,7 @@ bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
 enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
@@ -353,24 +364,16 @@ enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 		struct bfa_adapter_attr_s *ad_attr);
-int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
 void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 		int *trclen);
-void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 				 int *trclen);
 bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
 	u32 *offset, int *buflen);
-u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
-u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
 void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
-void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
-	struct bfa_ioc_hbfail_notify_s *notify);
 bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
-void bfa_ioc_sem_release(void __iomem *sem_reg);
-void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
@@ -381,13 +384,8 @@ bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 /*
  * bfa mfg wwn API functions
  */
-wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
-u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
 
 /*
  * F/W Image Size & Chunk
@@ -421,7 +419,7 @@ bfa_cb_image_get_chunk(int type, u32 off)
 		return bfi_image_ct_cna_get_chunk(off);	break;
 	case BFI_IMAGE_CB_FC:
 		return bfi_image_cb_fc_get_chunk(off);	break;
-	default: return 0;
+	default: return NULL;
 	}
 }
 

+ 92 - 5
drivers/scsi/bfa/bfa_ioc_cb.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_cbreg.h"
 #include "bfa_defs.h"
@@ -29,10 +30,14 @@ static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_cb;
+static struct bfa_ioc_hwif_s hwif_cb;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +51,12 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 	hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
 	hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
 	hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
-	hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail;
+	hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
 	hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+	hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
+	hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
+	hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
+	hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
 
 	ioc->ioc_hwif = &hwif_cb;
 }
@@ -58,6 +67,21 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
 static bfa_boolean_t
 bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
 {
+	struct bfi_ioc_image_hdr_s fwhdr;
+	uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	if (fwstate == BFI_IOC_UNINIT)
+		return BFA_TRUE;
+
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+
+	if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
+		return BFA_TRUE;
+
+	bfa_trc(ioc, fwstate);
+	bfa_trc(ioc, fwhdr.exec);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
 	return BFA_TRUE;
 }
 
@@ -70,7 +94,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
 {
 	writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 	readl(ioc->ioc_regs.err_set);
@@ -108,9 +132,11 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
 	}
 
 	/*
@@ -181,10 +207,71 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
+{
+}
 
+static void
+bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
+{
+}
+
+static void
+bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
+{
+	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+}
+
+static bfa_boolean_t
+bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t fwstate, alt_fwstate;
+	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	/*
+	 * At this point, this IOC is hoding the hw sem in the
+	 * start path (fwcheck) OR in the disable/enable path
+	 * OR to check if the other IOC has acknowledged failure.
+	 *
+	 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
+	 * or in MEMTEST states. In a normal scenario, this IOC
+	 * can not be in OP state when this function is called.
+	 *
+	 * However, this IOC could still be in OP state when
+	 * the OS driver is starting up, if the OptROM code has
+	 * left it in that state.
+	 *
+	 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
+	 * in the failure case and now, if the fwstate is not
+	 * BFI_IOC_FAIL it implies that the other PCI fn have
+	 * reinitialized the ASIC or this IOC got disabled, so
+	 * return TRUE.
+	 */
+	if (fwstate == BFI_IOC_UNINIT ||
+		fwstate == BFI_IOC_INITING ||
+		fwstate == BFI_IOC_DISABLED ||
+		fwstate == BFI_IOC_MEMTEST ||
+		fwstate == BFI_IOC_OP)
+		return BFA_TRUE;
+	else {
+		alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
+		if (alt_fwstate == BFI_IOC_FAIL ||
+			alt_fwstate == BFI_IOC_DISABLED ||
+			alt_fwstate == BFI_IOC_UNINIT ||
+			alt_fwstate == BFI_IOC_INITING ||
+			alt_fwstate == BFI_IOC_MEMTEST)
+			return BFA_TRUE;
+		else
+			return BFA_FALSE;
+	}
+}
 
 bfa_status_t
 bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)

+ 108 - 12
drivers/scsi/bfa/bfa_ioc_ct.c

@@ -15,12 +15,22 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_ctreg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CT);
 
+#define bfa_ioc_ct_sync_pos(__ioc)      \
+		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH    16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
 /*
  * forward declarations
  */
@@ -29,10 +39,14 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_ct;
+static struct bfa_ioc_hwif_s hwif_ct;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +60,12 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
 	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
 	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
 	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+	hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
 	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+	hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
+	hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
+	hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
+	hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
 
 	ioc->ioc_hwif = &hwif_ct;
 }
@@ -83,7 +101,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	 */
 	if (usecnt == 0) {
 		writel(1, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
 		bfa_trc(ioc, usecnt);
 		return BFA_TRUE;
 	}
@@ -94,14 +113,14 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	/*
 	 * Use count cannot be non-zero and chip in uninitialized state.
 	 */
-	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
 
 	/*
 	 * Check if another driver with a different firmware is active
 	 */
 	bfa_ioc_fwver_get(ioc, &fwhdr);
 	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 		bfa_trc(ioc, usecnt);
 		return BFA_FALSE;
 	}
@@ -111,7 +130,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	 */
 	usecnt++;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	bfa_trc(ioc, usecnt);
 	return BFA_TRUE;
 }
@@ -139,25 +158,27 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 	 */
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
-	bfa_assert(usecnt > 0);
+	WARN_ON(usecnt <= 0);
 
 	usecnt--;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 	bfa_trc(ioc, usecnt);
 
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
 /*
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 {
 	if (ioc->cna) {
 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
 		/* Wait for halt to take effect */
 		readl(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.alt_ll_halt);
 	} else {
 		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 		readl(ioc->ioc_regs.err_set);
@@ -209,15 +230,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 	}
 
 	/*
@@ -235,6 +260,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
 
 	/*
 	 * sram memory access
@@ -313,7 +339,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 	if (ioc->cna) {
 		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 		writel(0, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	}
 
 	/*
@@ -322,10 +348,80 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+					bfa_ioc_ct_sync_pos(ioc);
+
+	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
+		ioc->ioc_regs.ioc_fail_sync);
 }
 
+static bfa_boolean_t
+bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+	uint32_t tmp_ackd;
+
+	if (sync_ackd == 0)
+		return BFA_TRUE;
+
+	/*
+	 * The check below is to see whether any other PCI fn
+	 * has reinitialized the ASIC (reset sync_ackd bits)
+	 * and failed again while this IOC was waiting for hw
+	 * semaphore (in bfa_iocpf_sm_semwait()).
+	 */
+	tmp_ackd = sync_ackd;
+	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
+		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+	if (sync_reqd == sync_ackd) {
+		writel(bfa_ioc_ct_clear_sync_ackd(r32),
+			ioc->ioc_regs.ioc_fail_sync);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	/*
+	 * If another PCI fn reinitialized and failed again while
+	 * this IOC was waiting for hw sem, the sync_ackd bit for
+	 * this IOC need to be set again to allow reinitialization.
+	 */
+	if (tmp_ackd != sync_ackd)
+		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
 
+	return BFA_FALSE;
+}
 
 /*
  * Check the firmware state to know if pll_init has been completed already

+ 0 - 3
drivers/scsi/bfa/bfa_modules.h

@@ -99,7 +99,6 @@ struct bfa_module_s {
 	void (*iocdisable) (struct bfa_s *bfa);
 };
 
-extern struct bfa_module_s *hal_mods[];
 
 struct bfa_s {
 	void			*bfad;		/*  BFA driver instance    */
@@ -116,8 +115,6 @@ struct bfa_s {
 	struct bfa_msix_s	msix;
 };
 
-extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
-extern bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[];
 extern bfa_boolean_t bfa_auto_recover;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;

+ 0 - 143
drivers/scsi/bfa/bfa_os_inc.h

@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_OS_INC_H__
-#define __BFA_OS_INC_H__
-
-#include <linux/types.h>
-#include <linux/version.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include <linux/interrupt.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_transport_fc.h>
-#include <scsi/scsi_transport.h>
-
-#ifdef __BIG_ENDIAN
-#define __BIGENDIAN
-#endif
-
-static inline u64 bfa_os_get_log_time(void)
-{
-	u64 system_time = 0;
-	struct timeval tv;
-	do_gettimeofday(&tv);
-
-	/* We are interested in seconds only. */
-	system_time = tv.tv_sec;
-	return system_time;
-}
-
-#define bfa_io_lat_clock_res_div HZ
-#define bfa_io_lat_clock_res_mul 1000
-
-#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
-do {									\
-	if (((mask) == 4) || (level[1] <= '4'))				\
-		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
-} while (0)
-
-#define bfa_swap_3b(_x)				\
-	((((_x) & 0xff) << 16) |		\
-	((_x) & 0x00ff00) |			\
-	(((_x) & 0xff0000) >> 16))
-
-#define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
-	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x00000000ff000000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
-
-#ifndef __BIGENDIAN
-#define bfa_os_hton3b(_x)  bfa_swap_3b(_x)
-#define bfa_os_sgaddr(_x)  (_x)
-#else
-#define bfa_os_hton3b(_x)  (_x)
-#define bfa_os_sgaddr(_x)  bfa_os_swap_sgaddr(_x)
-#endif
-
-#define bfa_os_ntoh3b(_x)  bfa_os_hton3b(_x)
-#define bfa_os_u32(__pa64) ((__pa64) >> 32)
-
-#define BFA_TRC_TS(_trcm)				\
-	({						\
-		struct timeval tv;			\
-							\
-		do_gettimeofday(&tv);			\
-		(tv.tv_sec*1000000+tv.tv_usec);		\
-	 })
-
-#define boolean_t int
-
-/*
- * For current time stamp, OS API will fill-in
- */
-struct bfa_timeval_s {
-	u32	tv_sec;		/*  seconds        */
-	u32	tv_usec;	/*  microseconds   */
-};
-
-static inline void
-bfa_os_gettimeofday(struct bfa_timeval_s *tv)
-{
-	struct timeval  tmp_tv;
-
-	do_gettimeofday(&tmp_tv);
-	tv->tv_sec = (u32) tmp_tv.tv_sec;
-	tv->tv_usec = (u32) tmp_tv.tv_usec;
-}
-
-static inline void
-wwn2str(char *wwn_str, u64 wwn)
-{
-	union {
-		u64 wwn;
-		u8 byte[8];
-	} w;
-
-	w.wwn = wwn;
-	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
-		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
-		w.byte[6], w.byte[7]);
-}
-
-static inline void
-fcid2str(char *fcid_str, u32 fcid)
-{
-	union {
-		u32 fcid;
-		u8 byte[4];
-	} f;
-
-	f.fcid = fcid;
-	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
-}
-
-#endif /* __BFA_OS_INC_H__ */

+ 0 - 4
drivers/scsi/bfa/bfa_plog.h

@@ -151,9 +151,5 @@ void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 			enum bfa_plog_eid event, u16 misc,
 			struct fchs_s *fchdr, u32 pld_w0);
-void bfa_plog_clear(struct bfa_plog_s *plog);
-void bfa_plog_enable(struct bfa_plog_s *plog);
-void bfa_plog_disable(struct bfa_plog_s *plog);
-bfa_boolean_t	bfa_plog_get_setting(struct bfa_plog_s *plog);
 
 #endif /* __BFA_PORTLOG_H__ */

+ 12 - 25
drivers/scsi/bfa/bfa_port.c

@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_defs_svc.h"
 #include "bfa_port.h"
 #include "bfi.h"
@@ -29,14 +30,14 @@ static void
 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
 	u32    *dip = (u32 *) stats;
-	u32    t0, t1;
+	__be32    t0, t1;
 	int	    i;
 
 	for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
 		i += 2) {
 		t0 = dip[i];
 		t1 = dip[i + 1];
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		dip[i] = be32_to_cpu(t0);
 		dip[i + 1] = be32_to_cpu(t1);
 #else
@@ -96,13 +97,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 	port->stats_busy = BFA_FALSE;
 
 	if (status == BFA_STATUS_OK) {
-		struct bfa_timeval_s tv;
+		struct timeval tv;
 
 		memcpy(port->stats, port->stats_dma.kva,
 		       sizeof(union bfa_port_stats_u));
 		bfa_port_stats_swap(port, port->stats);
 
-		bfa_os_gettimeofday(&tv);
+		do_gettimeofday(&tv);
 		port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
 	}
 
@@ -124,7 +125,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 static void
 bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
 	port->stats_status = status;
 	port->stats_busy   = BFA_FALSE;
@@ -132,7 +133,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 	/*
 	* re-initialize time stamp for stats reset
 	*/
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	if (port->stats_cbfn) {
@@ -185,7 +186,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -432,9 +433,9 @@ void
 bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 		 void *dev, struct bfa_trc_mod_s *trcmod)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
-	bfa_assert(port);
+	WARN_ON(!port);
 
 	port->dev    = dev;
 	port->ioc    = ioc;
@@ -447,27 +448,13 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 
 	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
 	bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
-	bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
+	list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
 
 	/*
 	 * initialize time stamp for stats reset
 	 */
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	bfa_trc(port, 0);
 }
-
-/*
- * bfa_port_detach()
- *
- *
- * @param[in] port - Pointer to the Port module data structure
- *
- * @return void
- */
-void
-bfa_port_detach(struct bfa_port_s *port)
-{
-	bfa_trc(port, 0);
-}

+ 0 - 1
drivers/scsi/bfa/bfa_port.h

@@ -48,7 +48,6 @@ struct bfa_port_s {
 
 void	     bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 				void *dev, struct bfa_trc_mod_s *trcmod);
-void	     bfa_port_detach(struct bfa_port_s *port);
 void	     bfa_port_hbfail(void *arg);
 
 bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 180 - 339
drivers/scsi/bfa/bfa_svc.c


+ 14 - 49
drivers/scsi/bfa/bfa_svc.h

@@ -220,6 +220,18 @@ void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 /*
  * RPORT related defines
  */
+enum bfa_rport_event {
+	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event          */
+	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport  */
+	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online             */
+	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline            */
+	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response           */
+	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure             */
+	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware       */
+	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed             */
+	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue      */
+};
+
 #define BFA_RPORT_MIN	4
 
 struct bfa_rport_mod_s {
@@ -432,6 +444,7 @@ struct bfa_fcport_s {
 	u8			myalpa;	/*  my ALPA in LOOP topology */
 	u8			rsvd[3];
 	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
+	bfa_boolean_t		use_flash_cfg; /* get port cfg from flash */
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
 	struct bfa_reqq_wait_s	reqq_wait;
@@ -500,30 +513,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
 			void (*event_cbfn) (void *cbarg,
 			enum bfa_port_linkstate event), void *event_cbarg);
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
-void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
-void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
-bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
-					  enum bfa_port_speed speed);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
 void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
-void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
-void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
-		       bfa_boolean_t link_e2e_beacon);
-void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
-			     struct bfa_qos_attr_s *qos_attr);
-void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
-				struct bfa_qos_vc_attr_s *qos_vc_attr);
-bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
-				      union bfa_fcport_stats_u *stats,
-				      bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					void *cbarg);
-bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
-				       union bfa_fcport_stats_u *stats,
-				       bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					 void *cbarg);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
 bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
@@ -537,14 +529,9 @@ bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
  * bfa rport API functions
  */
 struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
-void bfa_rport_delete(struct bfa_rport_s *rport);
 void bfa_rport_online(struct bfa_rport_s *rport,
 		      struct bfa_rport_info_s *rport_info);
-void bfa_rport_offline(struct bfa_rport_s *rport);
 void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
-void bfa_rport_get_stats(struct bfa_rport_s *rport,
-			 struct bfa_rport_hal_stats_s *stats);
-void bfa_rport_clear_stats(struct bfa_rport_s *rport);
 void bfa_cb_rport_online(void *rport);
 void bfa_cb_rport_offline(void *rport);
 void bfa_cb_rport_qos_scn_flowid(void *rport,
@@ -553,8 +540,6 @@ void bfa_cb_rport_qos_scn_flowid(void *rport,
 void bfa_cb_rport_qos_scn_prio(void *rport,
 			       struct bfa_rport_qos_attr_s old_qos_attr,
 			       struct bfa_rport_qos_attr_s new_qos_attr);
-void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
-			    struct bfa_rport_qos_attr_s *qos_attr);
 
 /*
  * bfa fcxp API functions
@@ -619,38 +604,18 @@ void bfa_uf_free(struct bfa_uf_s *uf);
 u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
 struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
-void bfa_lps_discard(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
 		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
 		   bfa_boolean_t auth_en);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
 		   wwn_t pwwn, wwn_t nwwn);
-void bfa_lps_flogo(struct bfa_lps_s *lps);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
-u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
-bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
-u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
+void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
-u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
-mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
-void bfa_trunk_enable_cfg(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
-		struct bfa_trunk_attr_s *attr);
-
 #endif /* __BFA_SVC_H__ */

+ 18 - 20
drivers/scsi/bfa/bfad.c

@@ -32,7 +32,6 @@
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfa_fcs.h"
-#include "bfa_os_inc.h"
 #include "bfa_defs.h"
 #include "bfa.h"
 
@@ -61,12 +60,12 @@ int		msix_disable_cb = 0, msix_disable_ct = 0;
 u32	bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
 u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
 
-const char *msix_name_ct[] = {
+static const char *msix_name_ct[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"ctrl" };
 
-const char *msix_name_cb[] = {
+static const char *msix_name_cb[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
@@ -206,7 +205,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
 		}
 
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_init(&bfad->bfa);
+		bfa_iocfc_init(&bfad->bfa);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 		/* Set up interrupt handler for each vectors */
@@ -533,7 +532,7 @@ bfad_hal_mem_release(struct bfad_s *bfad)
 					(dma_addr_t) meminfo_elem->dma);
 				break;
 			default:
-				bfa_assert(0);
+				WARN_ON(1);
 				break;
 			}
 		}
@@ -725,7 +724,7 @@ bfad_bfa_tmo(unsigned long data)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	bfa_timer_tick(&bfad->bfa);
+	bfa_timer_beat(&bfad->bfa.timer_mod);
 
 	bfa_comp_deq(&bfad->bfa, &doneq);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -882,8 +881,8 @@ bfad_drv_init(struct bfad_s *bfad)
 		goto out_hal_mem_alloc_failure;
 	}
 
-	bfa_init_trc(&bfad->bfa, bfad->trcmod);
-	bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
+	bfad->bfa.trcmod = bfad->trcmod;
+	bfad->bfa.plog = &bfad->plog_buf;
 	bfa_plog_init(&bfad->plog_buf);
 	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
 		     0, "Driver Attach");
@@ -893,9 +892,9 @@ bfad_drv_init(struct bfad_s *bfad)
 
 	/* FCS INIT */
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
+	bfad->bfa_fcs.trcmod = bfad->trcmod;
 	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
-	bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
+	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -913,7 +912,7 @@ bfad_drv_uninit(struct bfad_s *bfad)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
 
@@ -932,8 +931,8 @@ bfad_drv_start(struct bfad_s *bfad)
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_start(&bfad->bfa);
-	bfa_fcs_start(&bfad->bfa_fcs);
+	bfa_iocfc_start(&bfad->bfa);
+	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
 	bfad->bfad_flags |= BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -963,7 +962,7 @@ bfad_stop(struct bfad_s *bfad)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
@@ -1102,15 +1101,15 @@ bfad_start_ops(struct bfad_s *bfad) {
 
 	/*
 	 * If bfa_linkup_delay is set to -1 default; try to retrive the
-	 * value using the bfad_os_get_linkup_delay(); else use the
+	 * value using the bfad_get_linkup_delay(); else use the
 	 * passed in module param value as the bfa_linkup_delay.
 	 */
 	if (bfa_linkup_delay < 0) {
-		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
-		bfad_os_rport_online_wait(bfad);
+		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
+		bfad_rport_online_wait(bfad);
 		bfa_linkup_delay = -1;
 	} else
-		bfad_os_rport_online_wait(bfad);
+		bfad_rport_online_wait(bfad);
 
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
 
@@ -1167,7 +1166,6 @@ bfad_intx(int irq, void *dev_id)
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
 		bfa_comp_free(&bfad->bfa, &doneq);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		bfa_trc_fp(bfad, irq);
 	}
 
 	return IRQ_HANDLED;
@@ -1524,7 +1522,7 @@ bfad_init(void)
 	if (strcmp(FCPI_NAME, " fcpim") == 0)
 		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
 
-	bfa_ioc_auto_recover(ioc_auto_recover);
+	bfa_auto_recover = ioc_auto_recover;
 	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
 
 	error = pci_register_driver(&bfad_pci_driver);

+ 9 - 9
drivers/scsi/bfa/bfad_attr.c

@@ -25,7 +25,7 @@
 /*
  * FC transport template entry, get SCSI target port ID.
  */
-void
+static void
 bfad_im_get_starget_port_id(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -40,7 +40,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
 
@@ -51,7 +51,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI target nwwn.
  */
-void
+static void
 bfad_im_get_starget_node_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -66,7 +66,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
 
@@ -77,7 +77,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI target pwwn.
  */
-void
+static void
 bfad_im_get_starget_port_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -92,7 +92,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
 
@@ -103,7 +103,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
 /*
  * FC transport template entry, get SCSI host port ID.
  */
-void
+static void
 bfad_im_get_host_port_id(struct Scsi_Host *shost)
 {
 	struct bfad_im_port_s *im_port =
@@ -111,7 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
 	struct bfad_port_s    *port = im_port->port;
 
 	fc_host_port_id(shost) =
-			bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
+			bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 
 /*
@@ -487,7 +487,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
 	wait_for_completion(vport->comp_del);
 
 free_scsi_host:
-	bfad_os_scsi_host_free(bfad, im_port);
+	bfad_scsi_host_free(bfad, im_port);
 
 	kfree(vport);
 

+ 39 - 7
drivers/scsi/bfa/bfad_debugfs.c

@@ -90,7 +90,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwtrc(&bfad->bfa,
+	rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -134,7 +134,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwsave(&bfad->bfa,
+	rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -208,7 +208,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
 	if (!debug || !debug->debug_buffer)
 		return 0;
 
-	return memory_read_from_buffer(buf, nbytes, pos,
+	return simple_read_from_buffer(buf, nbytes, pos,
 				debug->debug_buffer, debug->buffer_len);
 }
 
@@ -254,7 +254,7 @@ bfad_debugfs_read_regrd(struct file *file, char __user *buf,
 	if (!bfad->regdata)
 		return 0;
 
-	rc = memory_read_from_buffer(buf, nbytes, pos,
+	rc = simple_read_from_buffer(buf, nbytes, pos,
 			bfad->regdata, bfad->reglen);
 
 	if ((*pos + nbytes) >= bfad->reglen) {
@@ -279,15 +279,31 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 	u32 *regbuf;
 	void __iomem *rb, *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
 
-	rc = sscanf(buf, "%x:%x", &addr, &len);
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
 
+	kfree(kern_buf);
 	kfree(bfad->regdata);
 	bfad->regdata = NULL;
 	bfad->reglen = 0;
@@ -339,14 +355,30 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 	int addr, val, rc;
 	void __iomem *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
+
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
 
-	rc = sscanf(buf, "%x:%x", &addr, &val);
+	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
+	kfree(kern_buf);
 
 	addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
 
@@ -359,7 +391,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 		return -EINVAL;
 	}
 
-	reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
+	reg_addr = (bfa_ioc_bar0(ioc)) + addr;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	writel(val, reg_addr);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

+ 27 - 18
drivers/scsi/bfa/bfad_drv.h

@@ -26,7 +26,23 @@
 #ifndef __BFAD_DRV_H__
 #define __BFAD_DRV_H__
 
-#include "bfa_os_inc.h"
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -39,7 +55,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.3.2.0"
+#define BFAD_DRIVER_VERSION    "2.3.2.3"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -263,28 +279,21 @@ struct bfad_hal_comp {
  */
 #define nextLowerInt(x)                         \
 do {                                            \
-	int i;                                  \
+	int __i;                                  \
 	(*x)--;					\
-	for (i = 1; i < (sizeof(int)*8); i <<= 1) \
-		(*x) = (*x) | (*x) >> i;	\
+	for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
+		(*x) = (*x) | (*x) >> __i;	\
 	(*x)++;					\
 	(*x) = (*x) >> 1;			\
 } while (0)
 
 
-#define list_remove_head(list, entry, type, member)		\
-do {								\
-	entry = NULL;                                           \
-	if (!list_empty(list)) {                                \
-		entry = list_entry((list)->next, type, member);	\
-		list_del_init(&entry->member);			\
-	}							\
+#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
+do {									\
+	if (((mask) == 4) || (level[1] <= '4'))				\
+		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
 } while (0)
 
-#define list_get_first(list, type, member)				\
-((list_empty(list)) ? NULL :						\
-	list_entry((list)->next, type, member))
-
 bfa_status_t	bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
 				  struct bfa_lport_cfg_s *port_cfg,
 				  struct device *dev);
@@ -316,8 +325,8 @@ void		bfad_debugfs_exit(struct bfad_port_s *port);
 
 void bfad_pci_remove(struct pci_dev *pdev);
 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
-void bfad_os_rport_online_wait(struct bfad_s *bfad);
-int bfad_os_get_linkup_delay(struct bfad_s *bfad);
+void bfad_rport_online_wait(struct bfad_s *bfad);
+int bfad_get_linkup_delay(struct bfad_s *bfad);
 int bfad_install_msix_handler(struct bfad_s *bfad);
 
 extern struct idr bfad_im_port_index;

+ 30 - 29
drivers/scsi/bfa/bfad_im.c

@@ -21,7 +21,6 @@
 
 #include "bfad_drv.h"
 #include "bfad_im.h"
-#include "bfa_cb_ioim.h"
 #include "bfa_fcs.h"
 
 BFA_TRC_FILE(LDRV, IM);
@@ -93,10 +92,10 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
 		if (!cmnd->result && itnim &&
 			 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
 			/* Queue depth adjustment for good status completion */
-			bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+			bfad_ramp_up_qdepth(itnim, cmnd->device);
 		} else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
 			/* qfull handling */
-			bfad_os_handle_qfull(itnim, cmnd->device);
+			bfad_handle_qfull(itnim, cmnd->device);
 		}
 	}
 
@@ -124,7 +123,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
 		if (itnim_data) {
 			itnim = itnim_data->itnim;
 			if (itnim)
-				bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+				bfad_ramp_up_qdepth(itnim, cmnd->device);
 		}
 	}
 
@@ -183,7 +182,7 @@ bfad_im_info(struct Scsi_Host *shost)
 	bfa_get_adapter_model(bfa, model);
 
 	memset(bfa_buf, 0, sizeof(bfa_buf));
-	if (ioc->ctdev)
+	if (ioc->ctdev && !ioc->fcmode)
 		snprintf(bfa_buf, sizeof(bfa_buf),
 		"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
 		 model, bfad->pci_name, BFAD_DRIVER_VERSION);
@@ -258,6 +257,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
 	struct bfa_tskim_s *tskim;
 	struct bfa_itnim_s *bfa_itnim;
 	bfa_status_t    rc = BFA_STATUS_OK;
+	struct scsi_lun scsilun;
 
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	if (!tskim) {
@@ -274,7 +274,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
 	cmnd->host_scribble = NULL;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
+	memset(&scsilun, 0, sizeof(scsilun));
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
 out:
 	return rc;
@@ -301,6 +302,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 	int             rc = SUCCESS;
 	unsigned long   flags;
 	enum bfi_tskim_status task_status;
+	struct scsi_lun scsilun;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	itnim = itnim_data->itnim;
@@ -327,8 +329,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 	cmnd->SCp.ptr = (char *)&wq;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim,
-			    bfad_int_to_lun(cmnd->device->lun),
+	int_to_scsilun(cmnd->device->lun, &scsilun);
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -364,7 +366,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	for (i = 0; i < MAX_FCP_TARGET; i++) {
-		itnim = bfad_os_get_itnim(im_port, i);
+		itnim = bfad_get_itnim(im_port, i);
 		if (itnim) {
 			cmnd->SCp.ptr = (char *)&wq;
 			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
@@ -447,7 +449,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
 	struct bfad_im_s	*im = itnim_drv->im;
 
 	/* online to free state transtion should not happen */
-	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
+	WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
 
 	itnim_drv->queue_work = 1;
 	/* offline request is not yet done, use the same request to free */
@@ -545,7 +547,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
 
 	mutex_unlock(&bfad_mutex);
 
-	im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
+	im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
 	if (!im_port->shost) {
 		error = 1;
 		goto out_free_idr;
@@ -571,7 +573,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
 	}
 
 	/* setup host fixed attribute if the lk supports */
-	bfad_os_fc_host_init(im_port);
+	bfad_fc_host_init(im_port);
 
 	return 0;
 
@@ -662,7 +664,7 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
 	}
 
 	/* the itnim_mapped_list must be empty at this time */
-	bfa_assert(list_empty(&im_port->itnim_mapped_list));
+	WARN_ON(!list_empty(&im_port->itnim_mapped_list));
 
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
@@ -682,7 +684,7 @@ bfad_im_probe(struct bfad_s *bfad)
 	bfad->im = im;
 	im->bfad = bfad;
 
-	if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
+	if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
 		kfree(im);
 		rc = BFA_STATUS_FAILED;
 	}
@@ -695,14 +697,14 @@ void
 bfad_im_probe_undo(struct bfad_s *bfad)
 {
 	if (bfad->im) {
-		bfad_os_destroy_workq(bfad->im);
+		bfad_destroy_workq(bfad->im);
 		kfree(bfad->im);
 		bfad->im = NULL;
 	}
 }
 
 struct Scsi_Host *
-bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
+bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 {
 	struct scsi_host_template *sht;
 
@@ -717,7 +719,7 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 }
 
 void
-bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 {
 	if (!(im_port->flags & BFAD_PORT_DELETE))
 		flush_workqueue(bfad->im->drv_workq);
@@ -727,7 +729,7 @@ bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 }
 
 void
-bfad_os_destroy_workq(struct bfad_im_s *im)
+bfad_destroy_workq(struct bfad_im_s *im)
 {
 	if (im && im->drv_workq) {
 		flush_workqueue(im->drv_workq);
@@ -737,7 +739,7 @@ bfad_os_destroy_workq(struct bfad_im_s *im)
 }
 
 bfa_status_t
-bfad_os_thread_workq(struct bfad_s *bfad)
+bfad_thread_workq(struct bfad_s *bfad)
 {
 	struct bfad_im_s      *im = bfad->im;
 
@@ -841,7 +843,7 @@ bfad_im_module_exit(void)
 }
 
 void
-bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -869,7 +871,7 @@ bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 }
 
 void
-bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -883,7 +885,7 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 }
 
 struct bfad_itnim_s *
-bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
+bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
 {
 	struct bfad_itnim_s   *itnim = NULL;
 
@@ -922,7 +924,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
 	if (!ioc_attr)
 		return 0;
 
-	bfa_get_attr(bfa, ioc_attr);
+	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
 	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
 		if (ioc_attr->adapter_attr.is_mezz) {
 			supported_speed |= FC_PORTSPEED_8GBIT |
@@ -944,7 +946,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
 }
 
 void
-bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
+bfad_fc_host_init(struct bfad_im_port_s *im_port)
 {
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
@@ -988,7 +990,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
 	rport_ids.port_name =
 		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
 	rport_ids.port_id =
-		bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
+		bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
 
 	itnim->fc_rport = fc_rport =
@@ -1109,7 +1111,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
 		kfree(itnim);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 		break;
 	}
 
@@ -1172,7 +1174,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
 	}
 
 	cmnd->host_scribble = (char *)hal_io;
-	bfa_trc_fp(bfad, hal_io->iotag);
 	bfa_ioim_start(hal_io);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -1190,7 +1191,7 @@ out_fail_cmd:
 static DEF_SCSI_QCMD(bfad_im_queuecommand)
 
 void
-bfad_os_rport_online_wait(struct bfad_s *bfad)
+bfad_rport_online_wait(struct bfad_s *bfad)
 {
 	int i;
 	int rport_delay = 10;
@@ -1218,7 +1219,7 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
 }
 
 int
-bfad_os_get_linkup_delay(struct bfad_s *bfad)
+bfad_get_linkup_delay(struct bfad_s *bfad)
 {
 	u8		nwwns = 0;
 	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];

+ 8 - 8
drivers/scsi/bfa/bfad_im.h

@@ -117,17 +117,17 @@ struct bfad_im_s {
 	char            drv_workq_name[KOBJ_NAME_LEN];
 };
 
-struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
+struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
 				struct bfad_s *);
-bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
-void bfad_os_destroy_workq(struct bfad_im_s *im);
-void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
-void bfad_os_scsi_host_free(struct bfad_s *bfad,
+bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
+void bfad_destroy_workq(struct bfad_im_s *im);
+void bfad_fc_host_init(struct bfad_im_port_s *im_port);
+void bfad_scsi_host_free(struct bfad_s *bfad,
 				 struct bfad_im_port_s *im_port);
-void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
+void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
 				 struct scsi_device *sdev);
-void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
-struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
+void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
+struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
 
 extern struct scsi_host_template bfad_im_scsi_host_template;
 extern struct scsi_host_template bfad_im_vport_template;

+ 4 - 4
drivers/scsi/bfa/bfi.h

@@ -95,8 +95,8 @@ enum {
  */
 union bfi_addr_u {
 	struct {
-		u32	addr_lo;
-		u32	addr_hi;
+		__be32	addr_lo;
+		__be32	addr_hi;
 	} a32;
 };
 
@@ -104,7 +104,7 @@ union bfi_addr_u {
  * Scatter Gather Element
  */
 struct bfi_sge_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32	flags:2,
 			rsvd:2,
 			sg_len:28;
@@ -399,7 +399,7 @@ union bfi_ioc_i2h_msg_u {
  */
 struct bfi_pbc_blun_s {
 	wwn_t		tgt_pwwn;
-	lun_t		tgt_lun;
+	struct scsi_lun	tgt_lun;
 };
 
 /*

+ 1 - 0
drivers/scsi/bfa/bfi_cbreg.h

@@ -208,6 +208,7 @@
 #define BFA_IOC1_HBEAT_REG               HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG               HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT                 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		 HOST_SEM5_INFO_REG
 
 #define CPE_Q_DEPTH(__n) \
 	(CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))

+ 25 - 16
drivers/scsi/bfa/bfi_ctreg.h

@@ -522,6 +522,7 @@ enum {
 #define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
 
 #define CPE_DEPTH_Q(__n) \
 	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -539,22 +540,30 @@ enum {
 	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
 #define RME_CI_PTR_Q(__n) \
 	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+#define HQM_QSET_RXQ_DRBL_P0(__n) \
+	(HQM_QSET0_RXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) \
+	(HQM_QSET0_TXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) \
+	(HQM_QSET0_IB_DRBL_1_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) \
+	(HQM_QSET0_IB_DRBL_2_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) \
+	(HQM_QSET0_RXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) \
+	(HQM_QSET0_TXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) \
+	(HQM_QSET0_IB_DRBL_1_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) \
+	(HQM_QSET0_IB_DRBL_2_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
 
 #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
 #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))

+ 38 - 28
drivers/scsi/bfa/bfi_ms.h

@@ -47,10 +47,10 @@ struct bfi_iocfc_cfg_s {
 	 */
 	union bfi_addr_u  req_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  req_shadow_ci[BFI_IOC_MAX_CQS];
-	u16    req_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    req_cq_elems[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_shadow_pi[BFI_IOC_MAX_CQS];
-	u16    rsp_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    rsp_cq_elems[BFI_IOC_MAX_CQS];
 
 	union bfi_addr_u  stats_addr;	/*  DMA-able address for stats	  */
 	union bfi_addr_u  cfgrsp_addr;	/*  config response dma address  */
@@ -102,8 +102,8 @@ struct bfi_iocfc_set_intr_req_s {
 	struct bfi_mhdr_s mh;		/*  common msg header		*/
 	u8		coalesce;	/*  enable intr coalescing	*/
 	u8		rsvd[3];
-	u16	delay;		/*  delay timer 0..1125us	*/
-	u16	latency;	/*  latency timer 0..225us	*/
+	__be16	delay;		/*  delay timer 0..1125us	*/
+	__be16	latency;	/*  latency timer 0..225us	*/
 };
 
 
@@ -188,7 +188,8 @@ struct bfi_fcport_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		    */
 	u8		   status;	/*  port enable status		    */
 	u8		   rsvd[3];
-	u32	   msgtag;	/*  msgtag for reply		    */
+	struct	bfa_port_cfg_s port_cfg;/* port configuration	*/
+	u32	msgtag;			/* msgtag for reply	*/
 };
 
 /*
@@ -202,7 +203,8 @@ struct bfi_fcport_enable_req_s {
 	struct bfa_port_cfg_s port_cfg; /*  port configuration	    */
 	union bfi_addr_u   stats_dma_addr; /*  DMA address for stats	    */
 	u32	   msgtag;	/*  msgtag for reply		    */
-	u32	   rsvd2;
+	u8	use_flash_cfg;	/* get prot cfg from flash */
+	u8	rsvd2[3];
 };
 
 /*
@@ -210,7 +212,7 @@ struct bfi_fcport_enable_req_s {
  */
 struct bfi_fcport_set_svc_params_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header */
-	u16	   tx_bbcredit;	/*  Tx credits */
+	__be16	   tx_bbcredit;	/*  Tx credits */
 	u16	   rsvd;
 };
 
@@ -231,7 +233,7 @@ struct bfi_fcport_trunk_link_s {
 	u8			state;		/* bfa_trunk_link_state_t */
 	u8			speed;		/* bfa_port_speed_t */
 	u8			rsvd;
-	u32		deskew;
+	__be32		deskew;
 };
 
 #define BFI_FCPORT_MAX_LINKS	2
@@ -284,17 +286,17 @@ enum bfi_fcxp_i2h {
  */
 struct bfi_fcxp_send_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  driver request tag		    */
-	u16	max_frmsz;	/*  max send frame size	    */
-	u16	vf_id;		/*  vsan tag if applicable	    */
+	__be16	fcxp_tag;	/*  driver request tag		    */
+	__be16	max_frmsz;	/*  max send frame size	    */
+	__be16	vf_id;		/*  vsan tag if applicable	    */
 	u16	rport_fw_hndl;	/*  FW Handle for the remote port  */
 	u8	 class;		/*  FC class used for req/rsp	    */
 	u8	 rsp_timeout;	/*  timeout in secs, 0-no response */
 	u8	 cts;		/*  continue sequence		    */
 	u8	 lp_tag;	/*  lport tag			    */
 	struct fchs_s	fchs;	/*  request FC header structure    */
-	u32	req_len;	/*  request payload length	    */
-	u32	rsp_maxlen;	/*  max response length expected   */
+	__be32	req_len;	/*  request payload length	    */
+	__be32	rsp_maxlen;	/*  max response length expected   */
 	struct bfi_sge_s   req_sge[BFA_FCXP_MAX_SGES];	/*  request buf    */
 	struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];	/*  response buf   */
 };
@@ -304,11 +306,11 @@ struct bfi_fcxp_send_req_s {
  */
 struct bfi_fcxp_send_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  send request tag		    */
+	__be16	fcxp_tag;	/*  send request tag		    */
 	u8	 req_status;	/*  request status		    */
 	u8	 rsvd;
-	u32	rsp_len;	/*  actual response length	    */
-	u32	residue_len;	/*  residual response length	    */
+	__be32	rsp_len;	/*  actual response length	    */
+	__be32	residue_len;	/*  residual response length	    */
 	struct fchs_s	fchs;	/*  response FC header structure   */
 };
 
@@ -325,7 +327,7 @@ enum bfi_uf_i2h {
 struct bfi_uf_buf_post_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		*/
 	u16	buf_tag;	/*  buffer tag			*/
-	u16	buf_len;	/*  total buffer length	*/
+	__be16	buf_len;	/*  total buffer length	*/
 	struct bfi_sge_s   sge[BFA_UF_MAX_SGES]; /*  buffer DMA SGEs	*/
 };
 
@@ -340,6 +342,7 @@ struct bfi_uf_frm_rcvd_s {
 enum bfi_lps_h2i_msgs {
 	BFI_LPS_H2I_LOGIN_REQ	= 1,
 	BFI_LPS_H2I_LOGOUT_REQ	= 2,
+	BFI_LPS_H2I_N2N_PID_REQ = 3,
 };
 
 enum bfi_lps_i2h_msgs {
@@ -352,7 +355,7 @@ struct bfi_lps_login_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u8		lp_tag;
 	u8		alpa;
-	u16	pdu_size;
+	__be16		pdu_size;
 	wwn_t		pwwn;
 	wwn_t		nwwn;
 	u8		fdisc;
@@ -368,7 +371,7 @@ struct bfi_lps_login_rsp_s {
 	u8		lsrjt_expl;
 	wwn_t		port_name;
 	wwn_t		node_name;
-	u16	bb_credit;
+	__be16		bb_credit;
 	u8		f_port;
 	u8		npiv_en;
 	u32	lp_pid:24;
@@ -399,10 +402,17 @@ struct bfi_lps_cvl_event_s {
 	u8		rsvd[3];
 };
 
+struct bfi_lps_n2n_pid_req_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header		*/
+	u8	lp_tag;
+	u32	lp_pid:24;
+};
+
 union bfi_lps_h2i_msg_u {
 	struct bfi_mhdr_s		*msg;
 	struct bfi_lps_login_req_s	*login_req;
 	struct bfi_lps_logout_req_s	*logout_req;
+	struct bfi_lps_n2n_pid_req_s	*n2n_pid_req;
 };
 
 union bfi_lps_i2h_msg_u {
@@ -427,7 +437,7 @@ enum bfi_rport_i2h_msgs {
 struct bfi_rport_create_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u16	bfa_handle;	/*  host rport handle		*/
-	u16	max_frmsz;	/*  max rcv pdu size		*/
+	__be16	max_frmsz;	/*  max rcv pdu size		*/
 	u32	pid:24,	/*  remote port ID		*/
 		lp_tag:8;	/*  local port tag		*/
 	u32	local_pid:24,	/*  local port ID		*/
@@ -583,7 +593,7 @@ struct bfi_ioim_dif_s {
  */
 struct bfi_ioim_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	io_tag;		/*  I/O tag			 */
+	__be16	io_tag;		/*  I/O tag			 */
 	u16	rport_hdl;	/*  itnim/rport firmware handle */
 	struct fcp_cmnd_s	cmnd;	/*  IO request info	*/
 
@@ -689,7 +699,7 @@ enum bfi_ioim_status {
  */
 struct bfi_ioim_rsp_s {
 	struct bfi_mhdr_s	mh;	/*  common msg header		*/
-	u16	io_tag;		/*  completed IO tag		 */
+	__be16	io_tag;		/*  completed IO tag		 */
 	u16	bfa_rport_hndl;	/*  releated rport handle	 */
 	u8	io_status;	/*  IO completion status	 */
 	u8	reuse_io_tag;	/*  IO tag can be reused	*/
@@ -698,13 +708,13 @@ struct bfi_ioim_rsp_s {
 	u8		sns_len;	/*  scsi sense length		 */
 	u8		resid_flags;	/*  IO residue flags		 */
 	u8		rsvd_a;
-	u32	residue;	/*  IO residual length in bytes */
+	__be32	residue;	/*  IO residual length in bytes */
 	u32	rsvd_b[3];
 };
 
 struct bfi_ioim_abort_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header  */
-	u16	io_tag;	/*  I/O tag	*/
+	__be16	io_tag;	/*  I/O tag	*/
 	u16	abort_tag;	/*  unique request tag */
 };
 
@@ -723,9 +733,9 @@ enum bfi_tskim_i2h {
 
 struct bfi_tskim_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	itn_fhdl;	/*  itn firmware handle	*/
-	lun_t	lun;	/*  LU number	*/
+	struct 	scsi_lun lun;	/*  LU number	*/
 	u8	tm_flags;	/*  see enum fcp_tm_cmnd	*/
 	u8	t_secs;	/*  Timeout value in seconds	*/
 	u8	rsvd[2];
@@ -733,7 +743,7 @@ struct bfi_tskim_req_s {
 
 struct bfi_tskim_abortreq_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	rsvd;
 };
 
@@ -755,7 +765,7 @@ enum bfi_tskim_status {
 
 struct bfi_tskim_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	tsk_tag;	/*  task mgmt cmnd tag		 */
+	__be16	tsk_tag;	/*  task mgmt cmnd tag		 */
 	u8	tsk_status;	/*  @ref bfi_tskim_status */
 	u8	rsvd;
 };

+ 2 - 1
drivers/scsi/bnx2i/57xx_iscsi_constants.h

@@ -1,12 +1,13 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 #ifndef __57XX_ISCSI_CONSTANTS_H_
 #define __57XX_ISCSI_CONSTANTS_H_

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác