libahci.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201
  1. /*
  2. * libahci.c - Common AHCI SATA low-level routines
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/gfp.h>
  36. #include <linux/module.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <scsi/scsi_host.h>
  44. #include <scsi/scsi_cmnd.h>
  45. #include <linux/libata.h>
  46. #include "ahci.h"
  47. static int ahci_skip_host_reset;
  48. int ahci_ignore_sss;
  49. EXPORT_SYMBOL_GPL(ahci_ignore_sss);
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
  53. MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
  54. static int ahci_enable_alpm(struct ata_port *ap,
  55. enum link_pm policy);
  56. static void ahci_disable_alpm(struct ata_port *ap);
  57. static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
  58. static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
  59. size_t size);
  60. static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
  61. ssize_t size);
  62. static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
  63. static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
  64. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  65. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
  66. static int ahci_port_start(struct ata_port *ap);
  67. static void ahci_port_stop(struct ata_port *ap);
  68. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  69. static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
  70. static void ahci_freeze(struct ata_port *ap);
  71. static void ahci_thaw(struct ata_port *ap);
  72. static void ahci_enable_fbs(struct ata_port *ap);
  73. static void ahci_disable_fbs(struct ata_port *ap);
  74. static void ahci_pmp_attach(struct ata_port *ap);
  75. static void ahci_pmp_detach(struct ata_port *ap);
  76. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  77. unsigned long deadline);
  78. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  79. unsigned long deadline);
  80. static void ahci_postreset(struct ata_link *link, unsigned int *class);
  81. static void ahci_error_handler(struct ata_port *ap);
  82. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  83. static int ahci_port_resume(struct ata_port *ap);
  84. static void ahci_dev_config(struct ata_device *dev);
  85. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  86. u32 opts);
  87. #ifdef CONFIG_PM
  88. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  89. #endif
  90. static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
  91. static ssize_t ahci_activity_store(struct ata_device *dev,
  92. enum sw_activity val);
  93. static void ahci_init_sw_activity(struct ata_link *link);
  94. static ssize_t ahci_show_host_caps(struct device *dev,
  95. struct device_attribute *attr, char *buf);
  96. static ssize_t ahci_show_host_cap2(struct device *dev,
  97. struct device_attribute *attr, char *buf);
  98. static ssize_t ahci_show_host_version(struct device *dev,
  99. struct device_attribute *attr, char *buf);
  100. static ssize_t ahci_show_port_cmd(struct device *dev,
  101. struct device_attribute *attr, char *buf);
  102. static ssize_t ahci_read_em_buffer(struct device *dev,
  103. struct device_attribute *attr, char *buf);
  104. static ssize_t ahci_store_em_buffer(struct device *dev,
  105. struct device_attribute *attr,
  106. const char *buf, size_t size);
  107. static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
  108. static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
  109. static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
  110. static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
  111. static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
  112. ahci_read_em_buffer, ahci_store_em_buffer);
  113. static struct device_attribute *ahci_shost_attrs[] = {
  114. &dev_attr_link_power_management_policy,
  115. &dev_attr_em_message_type,
  116. &dev_attr_em_message,
  117. &dev_attr_ahci_host_caps,
  118. &dev_attr_ahci_host_cap2,
  119. &dev_attr_ahci_host_version,
  120. &dev_attr_ahci_port_cmd,
  121. &dev_attr_em_buffer,
  122. NULL
  123. };
  124. static struct device_attribute *ahci_sdev_attrs[] = {
  125. &dev_attr_sw_activity,
  126. &dev_attr_unload_heads,
  127. NULL
  128. };
  129. struct scsi_host_template ahci_sht = {
  130. ATA_NCQ_SHT("ahci"),
  131. .can_queue = AHCI_MAX_CMDS - 1,
  132. .sg_tablesize = AHCI_MAX_SG,
  133. .dma_boundary = AHCI_DMA_BOUNDARY,
  134. .shost_attrs = ahci_shost_attrs,
  135. .sdev_attrs = ahci_sdev_attrs,
  136. };
  137. EXPORT_SYMBOL_GPL(ahci_sht);
  138. struct ata_port_operations ahci_ops = {
  139. .inherits = &sata_pmp_port_ops,
  140. .qc_defer = ahci_pmp_qc_defer,
  141. .qc_prep = ahci_qc_prep,
  142. .qc_issue = ahci_qc_issue,
  143. .qc_fill_rtf = ahci_qc_fill_rtf,
  144. .freeze = ahci_freeze,
  145. .thaw = ahci_thaw,
  146. .softreset = ahci_softreset,
  147. .hardreset = ahci_hardreset,
  148. .postreset = ahci_postreset,
  149. .pmp_softreset = ahci_softreset,
  150. .error_handler = ahci_error_handler,
  151. .post_internal_cmd = ahci_post_internal_cmd,
  152. .dev_config = ahci_dev_config,
  153. .scr_read = ahci_scr_read,
  154. .scr_write = ahci_scr_write,
  155. .pmp_attach = ahci_pmp_attach,
  156. .pmp_detach = ahci_pmp_detach,
  157. .enable_pm = ahci_enable_alpm,
  158. .disable_pm = ahci_disable_alpm,
  159. .em_show = ahci_led_show,
  160. .em_store = ahci_led_store,
  161. .sw_activity_show = ahci_activity_show,
  162. .sw_activity_store = ahci_activity_store,
  163. #ifdef CONFIG_PM
  164. .port_suspend = ahci_port_suspend,
  165. .port_resume = ahci_port_resume,
  166. #endif
  167. .port_start = ahci_port_start,
  168. .port_stop = ahci_port_stop,
  169. };
  170. EXPORT_SYMBOL_GPL(ahci_ops);
  171. int ahci_em_messages = 1;
  172. EXPORT_SYMBOL_GPL(ahci_em_messages);
  173. module_param(ahci_em_messages, int, 0444);
  174. /* add other LED protocol types when they become supported */
  175. MODULE_PARM_DESC(ahci_em_messages,
  176. "AHCI Enclosure Management Message control (0 = off, 1 = on)");
  177. static void ahci_enable_ahci(void __iomem *mmio)
  178. {
  179. int i;
  180. u32 tmp;
  181. /* turn on AHCI_EN */
  182. tmp = readl(mmio + HOST_CTL);
  183. if (tmp & HOST_AHCI_EN)
  184. return;
  185. /* Some controllers need AHCI_EN to be written multiple times.
  186. * Try a few times before giving up.
  187. */
  188. for (i = 0; i < 5; i++) {
  189. tmp |= HOST_AHCI_EN;
  190. writel(tmp, mmio + HOST_CTL);
  191. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  192. if (tmp & HOST_AHCI_EN)
  193. return;
  194. msleep(10);
  195. }
  196. WARN_ON(1);
  197. }
  198. static ssize_t ahci_show_host_caps(struct device *dev,
  199. struct device_attribute *attr, char *buf)
  200. {
  201. struct Scsi_Host *shost = class_to_shost(dev);
  202. struct ata_port *ap = ata_shost_to_port(shost);
  203. struct ahci_host_priv *hpriv = ap->host->private_data;
  204. return sprintf(buf, "%x\n", hpriv->cap);
  205. }
  206. static ssize_t ahci_show_host_cap2(struct device *dev,
  207. struct device_attribute *attr, char *buf)
  208. {
  209. struct Scsi_Host *shost = class_to_shost(dev);
  210. struct ata_port *ap = ata_shost_to_port(shost);
  211. struct ahci_host_priv *hpriv = ap->host->private_data;
  212. return sprintf(buf, "%x\n", hpriv->cap2);
  213. }
  214. static ssize_t ahci_show_host_version(struct device *dev,
  215. struct device_attribute *attr, char *buf)
  216. {
  217. struct Scsi_Host *shost = class_to_shost(dev);
  218. struct ata_port *ap = ata_shost_to_port(shost);
  219. struct ahci_host_priv *hpriv = ap->host->private_data;
  220. void __iomem *mmio = hpriv->mmio;
  221. return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
  222. }
  223. static ssize_t ahci_show_port_cmd(struct device *dev,
  224. struct device_attribute *attr, char *buf)
  225. {
  226. struct Scsi_Host *shost = class_to_shost(dev);
  227. struct ata_port *ap = ata_shost_to_port(shost);
  228. void __iomem *port_mmio = ahci_port_base(ap);
  229. return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
  230. }
  231. static ssize_t ahci_read_em_buffer(struct device *dev,
  232. struct device_attribute *attr, char *buf)
  233. {
  234. struct Scsi_Host *shost = class_to_shost(dev);
  235. struct ata_port *ap = ata_shost_to_port(shost);
  236. struct ahci_host_priv *hpriv = ap->host->private_data;
  237. void __iomem *mmio = hpriv->mmio;
  238. void __iomem *em_mmio = mmio + hpriv->em_loc;
  239. u32 em_ctl, msg;
  240. unsigned long flags;
  241. size_t count;
  242. int i;
  243. spin_lock_irqsave(ap->lock, flags);
  244. em_ctl = readl(mmio + HOST_EM_CTL);
  245. if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
  246. !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
  247. spin_unlock_irqrestore(ap->lock, flags);
  248. return -EINVAL;
  249. }
  250. if (!(em_ctl & EM_CTL_MR)) {
  251. spin_unlock_irqrestore(ap->lock, flags);
  252. return -EAGAIN;
  253. }
  254. if (!(em_ctl & EM_CTL_SMB))
  255. em_mmio += hpriv->em_buf_sz;
  256. count = hpriv->em_buf_sz;
  257. /* the count should not be larger than PAGE_SIZE */
  258. if (count > PAGE_SIZE) {
  259. if (printk_ratelimit())
  260. ata_port_printk(ap, KERN_WARNING,
  261. "EM read buffer size too large: "
  262. "buffer size %u, page size %lu\n",
  263. hpriv->em_buf_sz, PAGE_SIZE);
  264. count = PAGE_SIZE;
  265. }
  266. for (i = 0; i < count; i += 4) {
  267. msg = readl(em_mmio + i);
  268. buf[i] = msg & 0xff;
  269. buf[i + 1] = (msg >> 8) & 0xff;
  270. buf[i + 2] = (msg >> 16) & 0xff;
  271. buf[i + 3] = (msg >> 24) & 0xff;
  272. }
  273. spin_unlock_irqrestore(ap->lock, flags);
  274. return i;
  275. }
  276. static ssize_t ahci_store_em_buffer(struct device *dev,
  277. struct device_attribute *attr,
  278. const char *buf, size_t size)
  279. {
  280. struct Scsi_Host *shost = class_to_shost(dev);
  281. struct ata_port *ap = ata_shost_to_port(shost);
  282. struct ahci_host_priv *hpriv = ap->host->private_data;
  283. void __iomem *mmio = hpriv->mmio;
  284. void __iomem *em_mmio = mmio + hpriv->em_loc;
  285. u32 em_ctl, msg;
  286. unsigned long flags;
  287. int i;
  288. /* check size validity */
  289. if (!(ap->flags & ATA_FLAG_EM) ||
  290. !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
  291. size % 4 || size > hpriv->em_buf_sz)
  292. return -EINVAL;
  293. spin_lock_irqsave(ap->lock, flags);
  294. em_ctl = readl(mmio + HOST_EM_CTL);
  295. if (em_ctl & EM_CTL_TM) {
  296. spin_unlock_irqrestore(ap->lock, flags);
  297. return -EBUSY;
  298. }
  299. for (i = 0; i < size; i += 4) {
  300. msg = buf[i] | buf[i + 1] << 8 |
  301. buf[i + 2] << 16 | buf[i + 3] << 24;
  302. writel(msg, em_mmio + i);
  303. }
  304. writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
  305. spin_unlock_irqrestore(ap->lock, flags);
  306. return size;
  307. }
  308. /**
  309. * ahci_save_initial_config - Save and fixup initial config values
  310. * @dev: target AHCI device
  311. * @hpriv: host private area to store config values
  312. * @force_port_map: force port map to a specified value
  313. * @mask_port_map: mask out particular bits from port map
  314. *
  315. * Some registers containing configuration info might be setup by
  316. * BIOS and might be cleared on reset. This function saves the
  317. * initial values of those registers into @hpriv such that they
  318. * can be restored after controller reset.
  319. *
  320. * If inconsistent, config values are fixed up by this function.
  321. *
  322. * LOCKING:
  323. * None.
  324. */
  325. void ahci_save_initial_config(struct device *dev,
  326. struct ahci_host_priv *hpriv,
  327. unsigned int force_port_map,
  328. unsigned int mask_port_map)
  329. {
  330. void __iomem *mmio = hpriv->mmio;
  331. u32 cap, cap2, vers, port_map;
  332. int i;
  333. /* make sure AHCI mode is enabled before accessing CAP */
  334. ahci_enable_ahci(mmio);
  335. /* Values prefixed with saved_ are written back to host after
  336. * reset. Values without are used for driver operation.
  337. */
  338. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  339. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  340. /* CAP2 register is only defined for AHCI 1.2 and later */
  341. vers = readl(mmio + HOST_VERSION);
  342. if ((vers >> 16) > 1 ||
  343. ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
  344. hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
  345. else
  346. hpriv->saved_cap2 = cap2 = 0;
  347. /* some chips have errata preventing 64bit use */
  348. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  349. dev_printk(KERN_INFO, dev,
  350. "controller can't do 64bit DMA, forcing 32bit\n");
  351. cap &= ~HOST_CAP_64;
  352. }
  353. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  354. dev_printk(KERN_INFO, dev,
  355. "controller can't do NCQ, turning off CAP_NCQ\n");
  356. cap &= ~HOST_CAP_NCQ;
  357. }
  358. if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
  359. dev_printk(KERN_INFO, dev,
  360. "controller can do NCQ, turning on CAP_NCQ\n");
  361. cap |= HOST_CAP_NCQ;
  362. }
  363. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  364. dev_printk(KERN_INFO, dev,
  365. "controller can't do PMP, turning off CAP_PMP\n");
  366. cap &= ~HOST_CAP_PMP;
  367. }
  368. if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
  369. dev_printk(KERN_INFO, dev,
  370. "controller can't do SNTF, turning off CAP_SNTF\n");
  371. cap &= ~HOST_CAP_SNTF;
  372. }
  373. if (force_port_map && port_map != force_port_map) {
  374. dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
  375. port_map, force_port_map);
  376. port_map = force_port_map;
  377. }
  378. if (mask_port_map) {
  379. dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
  380. port_map,
  381. port_map & mask_port_map);
  382. port_map &= mask_port_map;
  383. }
  384. /* cross check port_map and cap.n_ports */
  385. if (port_map) {
  386. int map_ports = 0;
  387. for (i = 0; i < AHCI_MAX_PORTS; i++)
  388. if (port_map & (1 << i))
  389. map_ports++;
  390. /* If PI has more ports than n_ports, whine, clear
  391. * port_map and let it be generated from n_ports.
  392. */
  393. if (map_ports > ahci_nr_ports(cap)) {
  394. dev_printk(KERN_WARNING, dev,
  395. "implemented port map (0x%x) contains more "
  396. "ports than nr_ports (%u), using nr_ports\n",
  397. port_map, ahci_nr_ports(cap));
  398. port_map = 0;
  399. }
  400. }
  401. /* fabricate port_map from cap.nr_ports */
  402. if (!port_map) {
  403. port_map = (1 << ahci_nr_ports(cap)) - 1;
  404. dev_printk(KERN_WARNING, dev,
  405. "forcing PORTS_IMPL to 0x%x\n", port_map);
  406. /* write the fixed up value to the PI register */
  407. hpriv->saved_port_map = port_map;
  408. }
  409. /* record values to use during operation */
  410. hpriv->cap = cap;
  411. hpriv->cap2 = cap2;
  412. hpriv->port_map = port_map;
  413. }
  414. EXPORT_SYMBOL_GPL(ahci_save_initial_config);
  415. /**
  416. * ahci_restore_initial_config - Restore initial config
  417. * @host: target ATA host
  418. *
  419. * Restore initial config stored by ahci_save_initial_config().
  420. *
  421. * LOCKING:
  422. * None.
  423. */
  424. static void ahci_restore_initial_config(struct ata_host *host)
  425. {
  426. struct ahci_host_priv *hpriv = host->private_data;
  427. void __iomem *mmio = hpriv->mmio;
  428. writel(hpriv->saved_cap, mmio + HOST_CAP);
  429. if (hpriv->saved_cap2)
  430. writel(hpriv->saved_cap2, mmio + HOST_CAP2);
  431. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  432. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  433. }
  434. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  435. {
  436. static const int offset[] = {
  437. [SCR_STATUS] = PORT_SCR_STAT,
  438. [SCR_CONTROL] = PORT_SCR_CTL,
  439. [SCR_ERROR] = PORT_SCR_ERR,
  440. [SCR_ACTIVE] = PORT_SCR_ACT,
  441. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  442. };
  443. struct ahci_host_priv *hpriv = ap->host->private_data;
  444. if (sc_reg < ARRAY_SIZE(offset) &&
  445. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  446. return offset[sc_reg];
  447. return 0;
  448. }
  449. static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
  450. {
  451. void __iomem *port_mmio = ahci_port_base(link->ap);
  452. int offset = ahci_scr_offset(link->ap, sc_reg);
  453. if (offset) {
  454. *val = readl(port_mmio + offset);
  455. return 0;
  456. }
  457. return -EINVAL;
  458. }
  459. static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
  460. {
  461. void __iomem *port_mmio = ahci_port_base(link->ap);
  462. int offset = ahci_scr_offset(link->ap, sc_reg);
  463. if (offset) {
  464. writel(val, port_mmio + offset);
  465. return 0;
  466. }
  467. return -EINVAL;
  468. }
  469. void ahci_start_engine(struct ata_port *ap)
  470. {
  471. void __iomem *port_mmio = ahci_port_base(ap);
  472. u32 tmp;
  473. /* start DMA */
  474. tmp = readl(port_mmio + PORT_CMD);
  475. tmp |= PORT_CMD_START;
  476. writel(tmp, port_mmio + PORT_CMD);
  477. readl(port_mmio + PORT_CMD); /* flush */
  478. }
  479. EXPORT_SYMBOL_GPL(ahci_start_engine);
  480. int ahci_stop_engine(struct ata_port *ap)
  481. {
  482. void __iomem *port_mmio = ahci_port_base(ap);
  483. u32 tmp;
  484. tmp = readl(port_mmio + PORT_CMD);
  485. /* check if the HBA is idle */
  486. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  487. return 0;
  488. /* setting HBA to idle */
  489. tmp &= ~PORT_CMD_START;
  490. writel(tmp, port_mmio + PORT_CMD);
  491. /* wait for engine to stop. This could be as long as 500 msec */
  492. tmp = ata_wait_register(port_mmio + PORT_CMD,
  493. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  494. if (tmp & PORT_CMD_LIST_ON)
  495. return -EIO;
  496. return 0;
  497. }
  498. EXPORT_SYMBOL_GPL(ahci_stop_engine);
  499. static void ahci_start_fis_rx(struct ata_port *ap)
  500. {
  501. void __iomem *port_mmio = ahci_port_base(ap);
  502. struct ahci_host_priv *hpriv = ap->host->private_data;
  503. struct ahci_port_priv *pp = ap->private_data;
  504. u32 tmp;
  505. /* set FIS registers */
  506. if (hpriv->cap & HOST_CAP_64)
  507. writel((pp->cmd_slot_dma >> 16) >> 16,
  508. port_mmio + PORT_LST_ADDR_HI);
  509. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  510. if (hpriv->cap & HOST_CAP_64)
  511. writel((pp->rx_fis_dma >> 16) >> 16,
  512. port_mmio + PORT_FIS_ADDR_HI);
  513. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  514. /* enable FIS reception */
  515. tmp = readl(port_mmio + PORT_CMD);
  516. tmp |= PORT_CMD_FIS_RX;
  517. writel(tmp, port_mmio + PORT_CMD);
  518. /* flush */
  519. readl(port_mmio + PORT_CMD);
  520. }
  521. static int ahci_stop_fis_rx(struct ata_port *ap)
  522. {
  523. void __iomem *port_mmio = ahci_port_base(ap);
  524. u32 tmp;
  525. /* disable FIS reception */
  526. tmp = readl(port_mmio + PORT_CMD);
  527. tmp &= ~PORT_CMD_FIS_RX;
  528. writel(tmp, port_mmio + PORT_CMD);
  529. /* wait for completion, spec says 500ms, give it 1000 */
  530. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  531. PORT_CMD_FIS_ON, 10, 1000);
  532. if (tmp & PORT_CMD_FIS_ON)
  533. return -EBUSY;
  534. return 0;
  535. }
  536. static void ahci_power_up(struct ata_port *ap)
  537. {
  538. struct ahci_host_priv *hpriv = ap->host->private_data;
  539. void __iomem *port_mmio = ahci_port_base(ap);
  540. u32 cmd;
  541. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  542. /* spin up device */
  543. if (hpriv->cap & HOST_CAP_SSS) {
  544. cmd |= PORT_CMD_SPIN_UP;
  545. writel(cmd, port_mmio + PORT_CMD);
  546. }
  547. /* wake up link */
  548. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  549. }
  550. static void ahci_disable_alpm(struct ata_port *ap)
  551. {
  552. struct ahci_host_priv *hpriv = ap->host->private_data;
  553. void __iomem *port_mmio = ahci_port_base(ap);
  554. u32 cmd;
  555. struct ahci_port_priv *pp = ap->private_data;
  556. /* IPM bits should be disabled by libata-core */
  557. /* get the existing command bits */
  558. cmd = readl(port_mmio + PORT_CMD);
  559. /* disable ALPM and ASP */
  560. cmd &= ~PORT_CMD_ASP;
  561. cmd &= ~PORT_CMD_ALPE;
  562. /* force the interface back to active */
  563. cmd |= PORT_CMD_ICC_ACTIVE;
  564. /* write out new cmd value */
  565. writel(cmd, port_mmio + PORT_CMD);
  566. cmd = readl(port_mmio + PORT_CMD);
  567. /* wait 10ms to be sure we've come out of any low power state */
  568. msleep(10);
  569. /* clear out any PhyRdy stuff from interrupt status */
  570. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  571. /* go ahead and clean out PhyRdy Change from Serror too */
  572. ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
  573. /*
  574. * Clear flag to indicate that we should ignore all PhyRdy
  575. * state changes
  576. */
  577. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  578. /*
  579. * Enable interrupts on Phy Ready.
  580. */
  581. pp->intr_mask |= PORT_IRQ_PHYRDY;
  582. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  583. /*
  584. * don't change the link pm policy - we can be called
  585. * just to turn of link pm temporarily
  586. */
  587. }
  588. static int ahci_enable_alpm(struct ata_port *ap,
  589. enum link_pm policy)
  590. {
  591. struct ahci_host_priv *hpriv = ap->host->private_data;
  592. void __iomem *port_mmio = ahci_port_base(ap);
  593. u32 cmd;
  594. struct ahci_port_priv *pp = ap->private_data;
  595. u32 asp;
  596. /* Make sure the host is capable of link power management */
  597. if (!(hpriv->cap & HOST_CAP_ALPM))
  598. return -EINVAL;
  599. switch (policy) {
  600. case MAX_PERFORMANCE:
  601. case NOT_AVAILABLE:
  602. /*
  603. * if we came here with NOT_AVAILABLE,
  604. * it just means this is the first time we
  605. * have tried to enable - default to max performance,
  606. * and let the user go to lower power modes on request.
  607. */
  608. ahci_disable_alpm(ap);
  609. return 0;
  610. case MIN_POWER:
  611. /* configure HBA to enter SLUMBER */
  612. asp = PORT_CMD_ASP;
  613. break;
  614. case MEDIUM_POWER:
  615. /* configure HBA to enter PARTIAL */
  616. asp = 0;
  617. break;
  618. default:
  619. return -EINVAL;
  620. }
  621. /*
  622. * Disable interrupts on Phy Ready. This keeps us from
  623. * getting woken up due to spurious phy ready interrupts
  624. * TBD - Hot plug should be done via polling now, is
  625. * that even supported?
  626. */
  627. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  628. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  629. /*
  630. * Set a flag to indicate that we should ignore all PhyRdy
  631. * state changes since these can happen now whenever we
  632. * change link state
  633. */
  634. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  635. /* get the existing command bits */
  636. cmd = readl(port_mmio + PORT_CMD);
  637. /*
  638. * Set ASP based on Policy
  639. */
  640. cmd |= asp;
  641. /*
  642. * Setting this bit will instruct the HBA to aggressively
  643. * enter a lower power link state when it's appropriate and
  644. * based on the value set above for ASP
  645. */
  646. cmd |= PORT_CMD_ALPE;
  647. /* write out new cmd value */
  648. writel(cmd, port_mmio + PORT_CMD);
  649. cmd = readl(port_mmio + PORT_CMD);
  650. /* IPM bits should be set by libata-core */
  651. return 0;
  652. }
  653. #ifdef CONFIG_PM
  654. static void ahci_power_down(struct ata_port *ap)
  655. {
  656. struct ahci_host_priv *hpriv = ap->host->private_data;
  657. void __iomem *port_mmio = ahci_port_base(ap);
  658. u32 cmd, scontrol;
  659. if (!(hpriv->cap & HOST_CAP_SSS))
  660. return;
  661. /* put device into listen mode, first set PxSCTL.DET to 0 */
  662. scontrol = readl(port_mmio + PORT_SCR_CTL);
  663. scontrol &= ~0xf;
  664. writel(scontrol, port_mmio + PORT_SCR_CTL);
  665. /* then set PxCMD.SUD to 0 */
  666. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  667. cmd &= ~PORT_CMD_SPIN_UP;
  668. writel(cmd, port_mmio + PORT_CMD);
  669. }
  670. #endif
  671. static void ahci_start_port(struct ata_port *ap)
  672. {
  673. struct ahci_port_priv *pp = ap->private_data;
  674. struct ata_link *link;
  675. struct ahci_em_priv *emp;
  676. ssize_t rc;
  677. int i;
  678. /* enable FIS reception */
  679. ahci_start_fis_rx(ap);
  680. /* enable DMA */
  681. ahci_start_engine(ap);
  682. /* turn on LEDs */
  683. if (ap->flags & ATA_FLAG_EM) {
  684. ata_for_each_link(link, ap, EDGE) {
  685. emp = &pp->em_priv[link->pmp];
  686. /* EM Transmit bit maybe busy during init */
  687. for (i = 0; i < EM_MAX_RETRY; i++) {
  688. rc = ahci_transmit_led_message(ap,
  689. emp->led_state,
  690. 4);
  691. if (rc == -EBUSY)
  692. msleep(1);
  693. else
  694. break;
  695. }
  696. }
  697. }
  698. if (ap->flags & ATA_FLAG_SW_ACTIVITY)
  699. ata_for_each_link(link, ap, EDGE)
  700. ahci_init_sw_activity(link);
  701. }
  702. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  703. {
  704. int rc;
  705. /* disable DMA */
  706. rc = ahci_stop_engine(ap);
  707. if (rc) {
  708. *emsg = "failed to stop engine";
  709. return rc;
  710. }
  711. /* disable FIS reception */
  712. rc = ahci_stop_fis_rx(ap);
  713. if (rc) {
  714. *emsg = "failed stop FIS RX";
  715. return rc;
  716. }
  717. return 0;
  718. }
  719. int ahci_reset_controller(struct ata_host *host)
  720. {
  721. struct ahci_host_priv *hpriv = host->private_data;
  722. void __iomem *mmio = hpriv->mmio;
  723. u32 tmp;
  724. /* we must be in AHCI mode, before using anything
  725. * AHCI-specific, such as HOST_RESET.
  726. */
  727. ahci_enable_ahci(mmio);
  728. /* global controller reset */
  729. if (!ahci_skip_host_reset) {
  730. tmp = readl(mmio + HOST_CTL);
  731. if ((tmp & HOST_RESET) == 0) {
  732. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  733. readl(mmio + HOST_CTL); /* flush */
  734. }
  735. /*
  736. * to perform host reset, OS should set HOST_RESET
  737. * and poll until this bit is read to be "0".
  738. * reset must complete within 1 second, or
  739. * the hardware should be considered fried.
  740. */
  741. tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
  742. HOST_RESET, 10, 1000);
  743. if (tmp & HOST_RESET) {
  744. dev_printk(KERN_ERR, host->dev,
  745. "controller reset failed (0x%x)\n", tmp);
  746. return -EIO;
  747. }
  748. /* turn on AHCI mode */
  749. ahci_enable_ahci(mmio);
  750. /* Some registers might be cleared on reset. Restore
  751. * initial values.
  752. */
  753. ahci_restore_initial_config(host);
  754. } else
  755. dev_printk(KERN_INFO, host->dev,
  756. "skipping global host reset\n");
  757. return 0;
  758. }
  759. EXPORT_SYMBOL_GPL(ahci_reset_controller);
  760. static void ahci_sw_activity(struct ata_link *link)
  761. {
  762. struct ata_port *ap = link->ap;
  763. struct ahci_port_priv *pp = ap->private_data;
  764. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  765. if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
  766. return;
  767. emp->activity++;
  768. if (!timer_pending(&emp->timer))
  769. mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
  770. }
  771. static void ahci_sw_activity_blink(unsigned long arg)
  772. {
  773. struct ata_link *link = (struct ata_link *)arg;
  774. struct ata_port *ap = link->ap;
  775. struct ahci_port_priv *pp = ap->private_data;
  776. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  777. unsigned long led_message = emp->led_state;
  778. u32 activity_led_state;
  779. unsigned long flags;
  780. led_message &= EM_MSG_LED_VALUE;
  781. led_message |= ap->port_no | (link->pmp << 8);
  782. /* check to see if we've had activity. If so,
  783. * toggle state of LED and reset timer. If not,
  784. * turn LED to desired idle state.
  785. */
  786. spin_lock_irqsave(ap->lock, flags);
  787. if (emp->saved_activity != emp->activity) {
  788. emp->saved_activity = emp->activity;
  789. /* get the current LED state */
  790. activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
  791. if (activity_led_state)
  792. activity_led_state = 0;
  793. else
  794. activity_led_state = 1;
  795. /* clear old state */
  796. led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
  797. /* toggle state */
  798. led_message |= (activity_led_state << 16);
  799. mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
  800. } else {
  801. /* switch to idle */
  802. led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
  803. if (emp->blink_policy == BLINK_OFF)
  804. led_message |= (1 << 16);
  805. }
  806. spin_unlock_irqrestore(ap->lock, flags);
  807. ahci_transmit_led_message(ap, led_message, 4);
  808. }
  809. static void ahci_init_sw_activity(struct ata_link *link)
  810. {
  811. struct ata_port *ap = link->ap;
  812. struct ahci_port_priv *pp = ap->private_data;
  813. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  814. /* init activity stats, setup timer */
  815. emp->saved_activity = emp->activity = 0;
  816. setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
  817. /* check our blink policy and set flag for link if it's enabled */
  818. if (emp->blink_policy)
  819. link->flags |= ATA_LFLAG_SW_ACTIVITY;
  820. }
  821. int ahci_reset_em(struct ata_host *host)
  822. {
  823. struct ahci_host_priv *hpriv = host->private_data;
  824. void __iomem *mmio = hpriv->mmio;
  825. u32 em_ctl;
  826. em_ctl = readl(mmio + HOST_EM_CTL);
  827. if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
  828. return -EINVAL;
  829. writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
  830. return 0;
  831. }
  832. EXPORT_SYMBOL_GPL(ahci_reset_em);
  833. static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
  834. ssize_t size)
  835. {
  836. struct ahci_host_priv *hpriv = ap->host->private_data;
  837. struct ahci_port_priv *pp = ap->private_data;
  838. void __iomem *mmio = hpriv->mmio;
  839. u32 em_ctl;
  840. u32 message[] = {0, 0};
  841. unsigned long flags;
  842. int pmp;
  843. struct ahci_em_priv *emp;
  844. /* get the slot number from the message */
  845. pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
  846. if (pmp < EM_MAX_SLOTS)
  847. emp = &pp->em_priv[pmp];
  848. else
  849. return -EINVAL;
  850. spin_lock_irqsave(ap->lock, flags);
  851. /*
  852. * if we are still busy transmitting a previous message,
  853. * do not allow
  854. */
  855. em_ctl = readl(mmio + HOST_EM_CTL);
  856. if (em_ctl & EM_CTL_TM) {
  857. spin_unlock_irqrestore(ap->lock, flags);
  858. return -EBUSY;
  859. }
  860. if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
  861. /*
  862. * create message header - this is all zero except for
  863. * the message size, which is 4 bytes.
  864. */
  865. message[0] |= (4 << 8);
  866. /* ignore 0:4 of byte zero, fill in port info yourself */
  867. message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
  868. /* write message to EM_LOC */
  869. writel(message[0], mmio + hpriv->em_loc);
  870. writel(message[1], mmio + hpriv->em_loc+4);
  871. /*
  872. * tell hardware to transmit the message
  873. */
  874. writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
  875. }
  876. /* save off new led state for port/slot */
  877. emp->led_state = state;
  878. spin_unlock_irqrestore(ap->lock, flags);
  879. return size;
  880. }
  881. static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
  882. {
  883. struct ahci_port_priv *pp = ap->private_data;
  884. struct ata_link *link;
  885. struct ahci_em_priv *emp;
  886. int rc = 0;
  887. ata_for_each_link(link, ap, EDGE) {
  888. emp = &pp->em_priv[link->pmp];
  889. rc += sprintf(buf, "%lx\n", emp->led_state);
  890. }
  891. return rc;
  892. }
  893. static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
  894. size_t size)
  895. {
  896. int state;
  897. int pmp;
  898. struct ahci_port_priv *pp = ap->private_data;
  899. struct ahci_em_priv *emp;
  900. state = simple_strtoul(buf, NULL, 0);
  901. /* get the slot number from the message */
  902. pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
  903. if (pmp < EM_MAX_SLOTS)
  904. emp = &pp->em_priv[pmp];
  905. else
  906. return -EINVAL;
  907. /* mask off the activity bits if we are in sw_activity
  908. * mode, user should turn off sw_activity before setting
  909. * activity led through em_message
  910. */
  911. if (emp->blink_policy)
  912. state &= ~EM_MSG_LED_VALUE_ACTIVITY;
  913. return ahci_transmit_led_message(ap, state, size);
  914. }
  915. static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
  916. {
  917. struct ata_link *link = dev->link;
  918. struct ata_port *ap = link->ap;
  919. struct ahci_port_priv *pp = ap->private_data;
  920. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  921. u32 port_led_state = emp->led_state;
  922. /* save the desired Activity LED behavior */
  923. if (val == OFF) {
  924. /* clear LFLAG */
  925. link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
  926. /* set the LED to OFF */
  927. port_led_state &= EM_MSG_LED_VALUE_OFF;
  928. port_led_state |= (ap->port_no | (link->pmp << 8));
  929. ahci_transmit_led_message(ap, port_led_state, 4);
  930. } else {
  931. link->flags |= ATA_LFLAG_SW_ACTIVITY;
  932. if (val == BLINK_OFF) {
  933. /* set LED to ON for idle */
  934. port_led_state &= EM_MSG_LED_VALUE_OFF;
  935. port_led_state |= (ap->port_no | (link->pmp << 8));
  936. port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
  937. ahci_transmit_led_message(ap, port_led_state, 4);
  938. }
  939. }
  940. emp->blink_policy = val;
  941. return 0;
  942. }
  943. static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
  944. {
  945. struct ata_link *link = dev->link;
  946. struct ata_port *ap = link->ap;
  947. struct ahci_port_priv *pp = ap->private_data;
  948. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  949. /* display the saved value of activity behavior for this
  950. * disk.
  951. */
  952. return sprintf(buf, "%d\n", emp->blink_policy);
  953. }
  954. static void ahci_port_init(struct device *dev, struct ata_port *ap,
  955. int port_no, void __iomem *mmio,
  956. void __iomem *port_mmio)
  957. {
  958. const char *emsg = NULL;
  959. int rc;
  960. u32 tmp;
  961. /* make sure port is not active */
  962. rc = ahci_deinit_port(ap, &emsg);
  963. if (rc)
  964. dev_warn(dev, "%s (%d)\n", emsg, rc);
  965. /* clear SError */
  966. tmp = readl(port_mmio + PORT_SCR_ERR);
  967. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  968. writel(tmp, port_mmio + PORT_SCR_ERR);
  969. /* clear port IRQ */
  970. tmp = readl(port_mmio + PORT_IRQ_STAT);
  971. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  972. if (tmp)
  973. writel(tmp, port_mmio + PORT_IRQ_STAT);
  974. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  975. }
  976. void ahci_init_controller(struct ata_host *host)
  977. {
  978. struct ahci_host_priv *hpriv = host->private_data;
  979. void __iomem *mmio = hpriv->mmio;
  980. int i;
  981. void __iomem *port_mmio;
  982. u32 tmp;
  983. for (i = 0; i < host->n_ports; i++) {
  984. struct ata_port *ap = host->ports[i];
  985. port_mmio = ahci_port_base(ap);
  986. if (ata_port_is_dummy(ap))
  987. continue;
  988. ahci_port_init(host->dev, ap, i, mmio, port_mmio);
  989. }
  990. tmp = readl(mmio + HOST_CTL);
  991. VPRINTK("HOST_CTL 0x%x\n", tmp);
  992. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  993. tmp = readl(mmio + HOST_CTL);
  994. VPRINTK("HOST_CTL 0x%x\n", tmp);
  995. }
  996. EXPORT_SYMBOL_GPL(ahci_init_controller);
  997. static void ahci_dev_config(struct ata_device *dev)
  998. {
  999. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  1000. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1001. dev->max_sectors = 255;
  1002. ata_dev_printk(dev, KERN_INFO,
  1003. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1004. }
  1005. }
  1006. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1007. {
  1008. void __iomem *port_mmio = ahci_port_base(ap);
  1009. struct ata_taskfile tf;
  1010. u32 tmp;
  1011. tmp = readl(port_mmio + PORT_SIG);
  1012. tf.lbah = (tmp >> 24) & 0xff;
  1013. tf.lbam = (tmp >> 16) & 0xff;
  1014. tf.lbal = (tmp >> 8) & 0xff;
  1015. tf.nsect = (tmp) & 0xff;
  1016. return ata_dev_classify(&tf);
  1017. }
  1018. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1019. u32 opts)
  1020. {
  1021. dma_addr_t cmd_tbl_dma;
  1022. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1023. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1024. pp->cmd_slot[tag].status = 0;
  1025. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1026. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1027. }
  1028. int ahci_kick_engine(struct ata_port *ap)
  1029. {
  1030. void __iomem *port_mmio = ahci_port_base(ap);
  1031. struct ahci_host_priv *hpriv = ap->host->private_data;
  1032. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1033. u32 tmp;
  1034. int busy, rc;
  1035. /* stop engine */
  1036. rc = ahci_stop_engine(ap);
  1037. if (rc)
  1038. goto out_restart;
  1039. /* need to do CLO?
  1040. * always do CLO if PMP is attached (AHCI-1.3 9.2)
  1041. */
  1042. busy = status & (ATA_BUSY | ATA_DRQ);
  1043. if (!busy && !sata_pmp_attached(ap)) {
  1044. rc = 0;
  1045. goto out_restart;
  1046. }
  1047. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1048. rc = -EOPNOTSUPP;
  1049. goto out_restart;
  1050. }
  1051. /* perform CLO */
  1052. tmp = readl(port_mmio + PORT_CMD);
  1053. tmp |= PORT_CMD_CLO;
  1054. writel(tmp, port_mmio + PORT_CMD);
  1055. rc = 0;
  1056. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1057. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1058. if (tmp & PORT_CMD_CLO)
  1059. rc = -EIO;
  1060. /* restart engine */
  1061. out_restart:
  1062. ahci_start_engine(ap);
  1063. return rc;
  1064. }
  1065. EXPORT_SYMBOL_GPL(ahci_kick_engine);
  1066. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1067. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1068. unsigned long timeout_msec)
  1069. {
  1070. const u32 cmd_fis_len = 5; /* five dwords */
  1071. struct ahci_port_priv *pp = ap->private_data;
  1072. void __iomem *port_mmio = ahci_port_base(ap);
  1073. u8 *fis = pp->cmd_tbl;
  1074. u32 tmp;
  1075. /* prep the command */
  1076. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1077. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1078. /* issue & wait */
  1079. writel(1, port_mmio + PORT_CMD_ISSUE);
  1080. if (timeout_msec) {
  1081. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1082. 1, timeout_msec);
  1083. if (tmp & 0x1) {
  1084. ahci_kick_engine(ap);
  1085. return -EBUSY;
  1086. }
  1087. } else
  1088. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1089. return 0;
  1090. }
  1091. int ahci_do_softreset(struct ata_link *link, unsigned int *class,
  1092. int pmp, unsigned long deadline,
  1093. int (*check_ready)(struct ata_link *link))
  1094. {
  1095. struct ata_port *ap = link->ap;
  1096. struct ahci_host_priv *hpriv = ap->host->private_data;
  1097. const char *reason = NULL;
  1098. unsigned long now, msecs;
  1099. struct ata_taskfile tf;
  1100. int rc;
  1101. DPRINTK("ENTER\n");
  1102. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1103. rc = ahci_kick_engine(ap);
  1104. if (rc && rc != -EOPNOTSUPP)
  1105. ata_link_printk(link, KERN_WARNING,
  1106. "failed to reset engine (errno=%d)\n", rc);
  1107. ata_tf_init(link->device, &tf);
  1108. /* issue the first D2H Register FIS */
  1109. msecs = 0;
  1110. now = jiffies;
  1111. if (time_after(now, deadline))
  1112. msecs = jiffies_to_msecs(deadline - now);
  1113. tf.ctl |= ATA_SRST;
  1114. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1115. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1116. rc = -EIO;
  1117. reason = "1st FIS failed";
  1118. goto fail;
  1119. }
  1120. /* spec says at least 5us, but be generous and sleep for 1ms */
  1121. msleep(1);
  1122. /* issue the second D2H Register FIS */
  1123. tf.ctl &= ~ATA_SRST;
  1124. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1125. /* wait for link to become ready */
  1126. rc = ata_wait_after_reset(link, deadline, check_ready);
  1127. if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
  1128. /*
  1129. * Workaround for cases where link online status can't
  1130. * be trusted. Treat device readiness timeout as link
  1131. * offline.
  1132. */
  1133. ata_link_printk(link, KERN_INFO,
  1134. "device not ready, treating as offline\n");
  1135. *class = ATA_DEV_NONE;
  1136. } else if (rc) {
  1137. /* link occupied, -ENODEV too is an error */
  1138. reason = "device not ready";
  1139. goto fail;
  1140. } else
  1141. *class = ahci_dev_classify(ap);
  1142. DPRINTK("EXIT, class=%u\n", *class);
  1143. return 0;
  1144. fail:
  1145. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1146. return rc;
  1147. }
  1148. int ahci_check_ready(struct ata_link *link)
  1149. {
  1150. void __iomem *port_mmio = ahci_port_base(link->ap);
  1151. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1152. return ata_check_ready(status);
  1153. }
  1154. EXPORT_SYMBOL_GPL(ahci_check_ready);
  1155. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1156. unsigned long deadline)
  1157. {
  1158. int pmp = sata_srst_pmp(link);
  1159. DPRINTK("ENTER\n");
  1160. return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
  1161. }
  1162. EXPORT_SYMBOL_GPL(ahci_do_softreset);
  1163. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1164. unsigned long deadline)
  1165. {
  1166. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  1167. struct ata_port *ap = link->ap;
  1168. struct ahci_port_priv *pp = ap->private_data;
  1169. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1170. struct ata_taskfile tf;
  1171. bool online;
  1172. int rc;
  1173. DPRINTK("ENTER\n");
  1174. ahci_stop_engine(ap);
  1175. /* clear D2H reception area to properly wait for D2H FIS */
  1176. ata_tf_init(link->device, &tf);
  1177. tf.command = 0x80;
  1178. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1179. rc = sata_link_hardreset(link, timing, deadline, &online,
  1180. ahci_check_ready);
  1181. ahci_start_engine(ap);
  1182. if (online)
  1183. *class = ahci_dev_classify(ap);
  1184. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1185. return rc;
  1186. }
  1187. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1188. {
  1189. struct ata_port *ap = link->ap;
  1190. void __iomem *port_mmio = ahci_port_base(ap);
  1191. u32 new_tmp, tmp;
  1192. ata_std_postreset(link, class);
  1193. /* Make sure port's ATAPI bit is set appropriately */
  1194. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1195. if (*class == ATA_DEV_ATAPI)
  1196. new_tmp |= PORT_CMD_ATAPI;
  1197. else
  1198. new_tmp &= ~PORT_CMD_ATAPI;
  1199. if (new_tmp != tmp) {
  1200. writel(new_tmp, port_mmio + PORT_CMD);
  1201. readl(port_mmio + PORT_CMD); /* flush */
  1202. }
  1203. }
  1204. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1205. {
  1206. struct scatterlist *sg;
  1207. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1208. unsigned int si;
  1209. VPRINTK("ENTER\n");
  1210. /*
  1211. * Next, the S/G list.
  1212. */
  1213. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1214. dma_addr_t addr = sg_dma_address(sg);
  1215. u32 sg_len = sg_dma_len(sg);
  1216. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1217. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1218. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1219. }
  1220. return si;
  1221. }
  1222. static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
  1223. {
  1224. struct ata_port *ap = qc->ap;
  1225. struct ahci_port_priv *pp = ap->private_data;
  1226. if (!sata_pmp_attached(ap) || pp->fbs_enabled)
  1227. return ata_std_qc_defer(qc);
  1228. else
  1229. return sata_pmp_qc_defer_cmd_switch(qc);
  1230. }
  1231. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1232. {
  1233. struct ata_port *ap = qc->ap;
  1234. struct ahci_port_priv *pp = ap->private_data;
  1235. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1236. void *cmd_tbl;
  1237. u32 opts;
  1238. const u32 cmd_fis_len = 5; /* five dwords */
  1239. unsigned int n_elem;
  1240. /*
  1241. * Fill in command table information. First, the header,
  1242. * a SATA Register - Host to Device command FIS.
  1243. */
  1244. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1245. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1246. if (is_atapi) {
  1247. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1248. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1249. }
  1250. n_elem = 0;
  1251. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1252. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1253. /*
  1254. * Fill in command slot information.
  1255. */
  1256. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1257. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1258. opts |= AHCI_CMD_WRITE;
  1259. if (is_atapi)
  1260. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1261. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1262. }
  1263. static void ahci_fbs_dec_intr(struct ata_port *ap)
  1264. {
  1265. struct ahci_port_priv *pp = ap->private_data;
  1266. void __iomem *port_mmio = ahci_port_base(ap);
  1267. u32 fbs = readl(port_mmio + PORT_FBS);
  1268. int retries = 3;
  1269. DPRINTK("ENTER\n");
  1270. BUG_ON(!pp->fbs_enabled);
  1271. /* time to wait for DEC is not specified by AHCI spec,
  1272. * add a retry loop for safety.
  1273. */
  1274. writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
  1275. fbs = readl(port_mmio + PORT_FBS);
  1276. while ((fbs & PORT_FBS_DEC) && retries--) {
  1277. udelay(1);
  1278. fbs = readl(port_mmio + PORT_FBS);
  1279. }
  1280. if (fbs & PORT_FBS_DEC)
  1281. dev_printk(KERN_ERR, ap->host->dev,
  1282. "failed to clear device error\n");
  1283. }
  1284. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1285. {
  1286. struct ahci_host_priv *hpriv = ap->host->private_data;
  1287. struct ahci_port_priv *pp = ap->private_data;
  1288. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1289. struct ata_link *link = NULL;
  1290. struct ata_queued_cmd *active_qc;
  1291. struct ata_eh_info *active_ehi;
  1292. bool fbs_need_dec = false;
  1293. u32 serror;
  1294. /* determine active link with error */
  1295. if (pp->fbs_enabled) {
  1296. void __iomem *port_mmio = ahci_port_base(ap);
  1297. u32 fbs = readl(port_mmio + PORT_FBS);
  1298. int pmp = fbs >> PORT_FBS_DWE_OFFSET;
  1299. if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
  1300. ata_link_online(&ap->pmp_link[pmp])) {
  1301. link = &ap->pmp_link[pmp];
  1302. fbs_need_dec = true;
  1303. }
  1304. } else
  1305. ata_for_each_link(link, ap, EDGE)
  1306. if (ata_link_active(link))
  1307. break;
  1308. if (!link)
  1309. link = &ap->link;
  1310. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1311. active_ehi = &link->eh_info;
  1312. /* record irq stat */
  1313. ata_ehi_clear_desc(host_ehi);
  1314. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1315. /* AHCI needs SError cleared; otherwise, it might lock up */
  1316. ahci_scr_read(&ap->link, SCR_ERROR, &serror);
  1317. ahci_scr_write(&ap->link, SCR_ERROR, serror);
  1318. host_ehi->serror |= serror;
  1319. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1320. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1321. irq_stat &= ~PORT_IRQ_IF_ERR;
  1322. if (irq_stat & PORT_IRQ_TF_ERR) {
  1323. /* If qc is active, charge it; otherwise, the active
  1324. * link. There's no active qc on NCQ errors. It will
  1325. * be determined by EH by reading log page 10h.
  1326. */
  1327. if (active_qc)
  1328. active_qc->err_mask |= AC_ERR_DEV;
  1329. else
  1330. active_ehi->err_mask |= AC_ERR_DEV;
  1331. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1332. host_ehi->serror &= ~SERR_INTERNAL;
  1333. }
  1334. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1335. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1336. active_ehi->err_mask |= AC_ERR_HSM;
  1337. active_ehi->action |= ATA_EH_RESET;
  1338. ata_ehi_push_desc(active_ehi,
  1339. "unknown FIS %08x %08x %08x %08x" ,
  1340. unk[0], unk[1], unk[2], unk[3]);
  1341. }
  1342. if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1343. active_ehi->err_mask |= AC_ERR_HSM;
  1344. active_ehi->action |= ATA_EH_RESET;
  1345. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1346. }
  1347. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1348. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1349. host_ehi->action |= ATA_EH_RESET;
  1350. ata_ehi_push_desc(host_ehi, "host bus error");
  1351. }
  1352. if (irq_stat & PORT_IRQ_IF_ERR) {
  1353. if (fbs_need_dec)
  1354. active_ehi->err_mask |= AC_ERR_DEV;
  1355. else {
  1356. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1357. host_ehi->action |= ATA_EH_RESET;
  1358. }
  1359. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1360. }
  1361. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1362. ata_ehi_hotplugged(host_ehi);
  1363. ata_ehi_push_desc(host_ehi, "%s",
  1364. irq_stat & PORT_IRQ_CONNECT ?
  1365. "connection status changed" : "PHY RDY changed");
  1366. }
  1367. /* okay, let's hand over to EH */
  1368. if (irq_stat & PORT_IRQ_FREEZE)
  1369. ata_port_freeze(ap);
  1370. else if (fbs_need_dec) {
  1371. ata_link_abort(link);
  1372. ahci_fbs_dec_intr(ap);
  1373. } else
  1374. ata_port_abort(ap);
  1375. }
  1376. static void ahci_port_intr(struct ata_port *ap)
  1377. {
  1378. void __iomem *port_mmio = ahci_port_base(ap);
  1379. struct ata_eh_info *ehi = &ap->link.eh_info;
  1380. struct ahci_port_priv *pp = ap->private_data;
  1381. struct ahci_host_priv *hpriv = ap->host->private_data;
  1382. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1383. u32 status, qc_active = 0;
  1384. int rc;
  1385. status = readl(port_mmio + PORT_IRQ_STAT);
  1386. writel(status, port_mmio + PORT_IRQ_STAT);
  1387. /* ignore BAD_PMP while resetting */
  1388. if (unlikely(resetting))
  1389. status &= ~PORT_IRQ_BAD_PMP;
  1390. /* If we are getting PhyRdy, this is
  1391. * just a power state change, we should
  1392. * clear out this, plus the PhyRdy/Comm
  1393. * Wake bits from Serror
  1394. */
  1395. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1396. (status & PORT_IRQ_PHYRDY)) {
  1397. status &= ~PORT_IRQ_PHYRDY;
  1398. ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1399. }
  1400. if (unlikely(status & PORT_IRQ_ERROR)) {
  1401. ahci_error_intr(ap, status);
  1402. return;
  1403. }
  1404. if (status & PORT_IRQ_SDB_FIS) {
  1405. /* If SNotification is available, leave notification
  1406. * handling to sata_async_notification(). If not,
  1407. * emulate it by snooping SDB FIS RX area.
  1408. *
  1409. * Snooping FIS RX area is probably cheaper than
  1410. * poking SNotification but some constrollers which
  1411. * implement SNotification, ICH9 for example, don't
  1412. * store AN SDB FIS into receive area.
  1413. */
  1414. if (hpriv->cap & HOST_CAP_SNTF)
  1415. sata_async_notification(ap);
  1416. else {
  1417. /* If the 'N' bit in word 0 of the FIS is set,
  1418. * we just received asynchronous notification.
  1419. * Tell libata about it.
  1420. *
  1421. * Lack of SNotification should not appear in
  1422. * ahci 1.2, so the workaround is unnecessary
  1423. * when FBS is enabled.
  1424. */
  1425. if (pp->fbs_enabled)
  1426. WARN_ON_ONCE(1);
  1427. else {
  1428. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1429. u32 f0 = le32_to_cpu(f[0]);
  1430. if (f0 & (1 << 15))
  1431. sata_async_notification(ap);
  1432. }
  1433. }
  1434. }
  1435. /* pp->active_link is not reliable once FBS is enabled, both
  1436. * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
  1437. * NCQ and non-NCQ commands may be in flight at the same time.
  1438. */
  1439. if (pp->fbs_enabled) {
  1440. if (ap->qc_active) {
  1441. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1442. qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
  1443. }
  1444. } else {
  1445. /* pp->active_link is valid iff any command is in flight */
  1446. if (ap->qc_active && pp->active_link->sactive)
  1447. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1448. else
  1449. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1450. }
  1451. rc = ata_qc_complete_multiple(ap, qc_active);
  1452. /* while resetting, invalid completions are expected */
  1453. if (unlikely(rc < 0 && !resetting)) {
  1454. ehi->err_mask |= AC_ERR_HSM;
  1455. ehi->action |= ATA_EH_RESET;
  1456. ata_port_freeze(ap);
  1457. }
  1458. }
  1459. irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1460. {
  1461. struct ata_host *host = dev_instance;
  1462. struct ahci_host_priv *hpriv;
  1463. unsigned int i, handled = 0;
  1464. void __iomem *mmio;
  1465. u32 irq_stat, irq_masked;
  1466. VPRINTK("ENTER\n");
  1467. hpriv = host->private_data;
  1468. mmio = hpriv->mmio;
  1469. /* sigh. 0xffffffff is a valid return from h/w */
  1470. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1471. if (!irq_stat)
  1472. return IRQ_NONE;
  1473. irq_masked = irq_stat & hpriv->port_map;
  1474. spin_lock(&host->lock);
  1475. for (i = 0; i < host->n_ports; i++) {
  1476. struct ata_port *ap;
  1477. if (!(irq_masked & (1 << i)))
  1478. continue;
  1479. ap = host->ports[i];
  1480. if (ap) {
  1481. ahci_port_intr(ap);
  1482. VPRINTK("port %u\n", i);
  1483. } else {
  1484. VPRINTK("port %u (no irq)\n", i);
  1485. if (ata_ratelimit())
  1486. dev_printk(KERN_WARNING, host->dev,
  1487. "interrupt on disabled port %u\n", i);
  1488. }
  1489. handled = 1;
  1490. }
  1491. /* HOST_IRQ_STAT behaves as level triggered latch meaning that
  1492. * it should be cleared after all the port events are cleared;
  1493. * otherwise, it will raise a spurious interrupt after each
  1494. * valid one. Please read section 10.6.2 of ahci 1.1 for more
  1495. * information.
  1496. *
  1497. * Also, use the unmasked value to clear interrupt as spurious
  1498. * pending event on a dummy port might cause screaming IRQ.
  1499. */
  1500. writel(irq_stat, mmio + HOST_IRQ_STAT);
  1501. spin_unlock(&host->lock);
  1502. VPRINTK("EXIT\n");
  1503. return IRQ_RETVAL(handled);
  1504. }
  1505. EXPORT_SYMBOL_GPL(ahci_interrupt);
  1506. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1507. {
  1508. struct ata_port *ap = qc->ap;
  1509. void __iomem *port_mmio = ahci_port_base(ap);
  1510. struct ahci_port_priv *pp = ap->private_data;
  1511. /* Keep track of the currently active link. It will be used
  1512. * in completion path to determine whether NCQ phase is in
  1513. * progress.
  1514. */
  1515. pp->active_link = qc->dev->link;
  1516. if (qc->tf.protocol == ATA_PROT_NCQ)
  1517. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1518. if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
  1519. u32 fbs = readl(port_mmio + PORT_FBS);
  1520. fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
  1521. fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
  1522. writel(fbs, port_mmio + PORT_FBS);
  1523. pp->fbs_last_dev = qc->dev->link->pmp;
  1524. }
  1525. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1526. ahci_sw_activity(qc->dev->link);
  1527. return 0;
  1528. }
  1529. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
  1530. {
  1531. struct ahci_port_priv *pp = qc->ap->private_data;
  1532. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1533. if (pp->fbs_enabled)
  1534. d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
  1535. ata_tf_from_fis(d2h_fis, &qc->result_tf);
  1536. return true;
  1537. }
  1538. static void ahci_freeze(struct ata_port *ap)
  1539. {
  1540. void __iomem *port_mmio = ahci_port_base(ap);
  1541. /* turn IRQ off */
  1542. writel(0, port_mmio + PORT_IRQ_MASK);
  1543. }
  1544. static void ahci_thaw(struct ata_port *ap)
  1545. {
  1546. struct ahci_host_priv *hpriv = ap->host->private_data;
  1547. void __iomem *mmio = hpriv->mmio;
  1548. void __iomem *port_mmio = ahci_port_base(ap);
  1549. u32 tmp;
  1550. struct ahci_port_priv *pp = ap->private_data;
  1551. /* clear IRQ */
  1552. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1553. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1554. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1555. /* turn IRQ back on */
  1556. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1557. }
  1558. static void ahci_error_handler(struct ata_port *ap)
  1559. {
  1560. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1561. /* restart engine */
  1562. ahci_stop_engine(ap);
  1563. ahci_start_engine(ap);
  1564. }
  1565. sata_pmp_error_handler(ap);
  1566. if (!ata_dev_enabled(ap->link.device))
  1567. ahci_stop_engine(ap);
  1568. }
  1569. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1570. {
  1571. struct ata_port *ap = qc->ap;
  1572. /* make DMA engine forget about the failed command */
  1573. if (qc->flags & ATA_QCFLAG_FAILED)
  1574. ahci_kick_engine(ap);
  1575. }
  1576. static void ahci_enable_fbs(struct ata_port *ap)
  1577. {
  1578. struct ahci_port_priv *pp = ap->private_data;
  1579. void __iomem *port_mmio = ahci_port_base(ap);
  1580. u32 fbs;
  1581. int rc;
  1582. if (!pp->fbs_supported)
  1583. return;
  1584. fbs = readl(port_mmio + PORT_FBS);
  1585. if (fbs & PORT_FBS_EN) {
  1586. pp->fbs_enabled = true;
  1587. pp->fbs_last_dev = -1; /* initialization */
  1588. return;
  1589. }
  1590. rc = ahci_stop_engine(ap);
  1591. if (rc)
  1592. return;
  1593. writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
  1594. fbs = readl(port_mmio + PORT_FBS);
  1595. if (fbs & PORT_FBS_EN) {
  1596. dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
  1597. pp->fbs_enabled = true;
  1598. pp->fbs_last_dev = -1; /* initialization */
  1599. } else
  1600. dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
  1601. ahci_start_engine(ap);
  1602. }
  1603. static void ahci_disable_fbs(struct ata_port *ap)
  1604. {
  1605. struct ahci_port_priv *pp = ap->private_data;
  1606. void __iomem *port_mmio = ahci_port_base(ap);
  1607. u32 fbs;
  1608. int rc;
  1609. if (!pp->fbs_supported)
  1610. return;
  1611. fbs = readl(port_mmio + PORT_FBS);
  1612. if ((fbs & PORT_FBS_EN) == 0) {
  1613. pp->fbs_enabled = false;
  1614. return;
  1615. }
  1616. rc = ahci_stop_engine(ap);
  1617. if (rc)
  1618. return;
  1619. writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
  1620. fbs = readl(port_mmio + PORT_FBS);
  1621. if (fbs & PORT_FBS_EN)
  1622. dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
  1623. else {
  1624. dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
  1625. pp->fbs_enabled = false;
  1626. }
  1627. ahci_start_engine(ap);
  1628. }
  1629. static void ahci_pmp_attach(struct ata_port *ap)
  1630. {
  1631. void __iomem *port_mmio = ahci_port_base(ap);
  1632. struct ahci_port_priv *pp = ap->private_data;
  1633. u32 cmd;
  1634. cmd = readl(port_mmio + PORT_CMD);
  1635. cmd |= PORT_CMD_PMP;
  1636. writel(cmd, port_mmio + PORT_CMD);
  1637. ahci_enable_fbs(ap);
  1638. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1639. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1640. }
  1641. static void ahci_pmp_detach(struct ata_port *ap)
  1642. {
  1643. void __iomem *port_mmio = ahci_port_base(ap);
  1644. struct ahci_port_priv *pp = ap->private_data;
  1645. u32 cmd;
  1646. ahci_disable_fbs(ap);
  1647. cmd = readl(port_mmio + PORT_CMD);
  1648. cmd &= ~PORT_CMD_PMP;
  1649. writel(cmd, port_mmio + PORT_CMD);
  1650. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1651. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1652. }
  1653. static int ahci_port_resume(struct ata_port *ap)
  1654. {
  1655. ahci_power_up(ap);
  1656. ahci_start_port(ap);
  1657. if (sata_pmp_attached(ap))
  1658. ahci_pmp_attach(ap);
  1659. else
  1660. ahci_pmp_detach(ap);
  1661. return 0;
  1662. }
  1663. #ifdef CONFIG_PM
  1664. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1665. {
  1666. const char *emsg = NULL;
  1667. int rc;
  1668. rc = ahci_deinit_port(ap, &emsg);
  1669. if (rc == 0)
  1670. ahci_power_down(ap);
  1671. else {
  1672. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1673. ahci_start_port(ap);
  1674. }
  1675. return rc;
  1676. }
  1677. #endif
  1678. static int ahci_port_start(struct ata_port *ap)
  1679. {
  1680. struct ahci_host_priv *hpriv = ap->host->private_data;
  1681. struct device *dev = ap->host->dev;
  1682. struct ahci_port_priv *pp;
  1683. void *mem;
  1684. dma_addr_t mem_dma;
  1685. size_t dma_sz, rx_fis_sz;
  1686. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1687. if (!pp)
  1688. return -ENOMEM;
  1689. /* check FBS capability */
  1690. if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
  1691. void __iomem *port_mmio = ahci_port_base(ap);
  1692. u32 cmd = readl(port_mmio + PORT_CMD);
  1693. if (cmd & PORT_CMD_FBSCP)
  1694. pp->fbs_supported = true;
  1695. else
  1696. dev_printk(KERN_WARNING, dev,
  1697. "The port is not capable of FBS\n");
  1698. }
  1699. if (pp->fbs_supported) {
  1700. dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
  1701. rx_fis_sz = AHCI_RX_FIS_SZ * 16;
  1702. } else {
  1703. dma_sz = AHCI_PORT_PRIV_DMA_SZ;
  1704. rx_fis_sz = AHCI_RX_FIS_SZ;
  1705. }
  1706. mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
  1707. if (!mem)
  1708. return -ENOMEM;
  1709. memset(mem, 0, dma_sz);
  1710. /*
  1711. * First item in chunk of DMA memory: 32-slot command table,
  1712. * 32 bytes each in size
  1713. */
  1714. pp->cmd_slot = mem;
  1715. pp->cmd_slot_dma = mem_dma;
  1716. mem += AHCI_CMD_SLOT_SZ;
  1717. mem_dma += AHCI_CMD_SLOT_SZ;
  1718. /*
  1719. * Second item: Received-FIS area
  1720. */
  1721. pp->rx_fis = mem;
  1722. pp->rx_fis_dma = mem_dma;
  1723. mem += rx_fis_sz;
  1724. mem_dma += rx_fis_sz;
  1725. /*
  1726. * Third item: data area for storing a single command
  1727. * and its scatter-gather table
  1728. */
  1729. pp->cmd_tbl = mem;
  1730. pp->cmd_tbl_dma = mem_dma;
  1731. /*
  1732. * Save off initial list of interrupts to be enabled.
  1733. * This could be changed later
  1734. */
  1735. pp->intr_mask = DEF_PORT_IRQ;
  1736. ap->private_data = pp;
  1737. /* engage engines, captain */
  1738. return ahci_port_resume(ap);
  1739. }
  1740. static void ahci_port_stop(struct ata_port *ap)
  1741. {
  1742. const char *emsg = NULL;
  1743. int rc;
  1744. /* de-initialize port */
  1745. rc = ahci_deinit_port(ap, &emsg);
  1746. if (rc)
  1747. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1748. }
  1749. void ahci_print_info(struct ata_host *host, const char *scc_s)
  1750. {
  1751. struct ahci_host_priv *hpriv = host->private_data;
  1752. void __iomem *mmio = hpriv->mmio;
  1753. u32 vers, cap, cap2, impl, speed;
  1754. const char *speed_s;
  1755. vers = readl(mmio + HOST_VERSION);
  1756. cap = hpriv->cap;
  1757. cap2 = hpriv->cap2;
  1758. impl = hpriv->port_map;
  1759. speed = (cap >> 20) & 0xf;
  1760. if (speed == 1)
  1761. speed_s = "1.5";
  1762. else if (speed == 2)
  1763. speed_s = "3";
  1764. else if (speed == 3)
  1765. speed_s = "6";
  1766. else
  1767. speed_s = "?";
  1768. dev_info(host->dev,
  1769. "AHCI %02x%02x.%02x%02x "
  1770. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  1771. ,
  1772. (vers >> 24) & 0xff,
  1773. (vers >> 16) & 0xff,
  1774. (vers >> 8) & 0xff,
  1775. vers & 0xff,
  1776. ((cap >> 8) & 0x1f) + 1,
  1777. (cap & 0x1f) + 1,
  1778. speed_s,
  1779. impl,
  1780. scc_s);
  1781. dev_info(host->dev,
  1782. "flags: "
  1783. "%s%s%s%s%s%s%s"
  1784. "%s%s%s%s%s%s%s"
  1785. "%s%s%s%s%s%s\n"
  1786. ,
  1787. cap & HOST_CAP_64 ? "64bit " : "",
  1788. cap & HOST_CAP_NCQ ? "ncq " : "",
  1789. cap & HOST_CAP_SNTF ? "sntf " : "",
  1790. cap & HOST_CAP_MPS ? "ilck " : "",
  1791. cap & HOST_CAP_SSS ? "stag " : "",
  1792. cap & HOST_CAP_ALPM ? "pm " : "",
  1793. cap & HOST_CAP_LED ? "led " : "",
  1794. cap & HOST_CAP_CLO ? "clo " : "",
  1795. cap & HOST_CAP_ONLY ? "only " : "",
  1796. cap & HOST_CAP_PMP ? "pmp " : "",
  1797. cap & HOST_CAP_FBS ? "fbs " : "",
  1798. cap & HOST_CAP_PIO_MULTI ? "pio " : "",
  1799. cap & HOST_CAP_SSC ? "slum " : "",
  1800. cap & HOST_CAP_PART ? "part " : "",
  1801. cap & HOST_CAP_CCC ? "ccc " : "",
  1802. cap & HOST_CAP_EMS ? "ems " : "",
  1803. cap & HOST_CAP_SXS ? "sxs " : "",
  1804. cap2 & HOST_CAP2_APST ? "apst " : "",
  1805. cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
  1806. cap2 & HOST_CAP2_BOH ? "boh " : ""
  1807. );
  1808. }
  1809. EXPORT_SYMBOL_GPL(ahci_print_info);
  1810. void ahci_set_em_messages(struct ahci_host_priv *hpriv,
  1811. struct ata_port_info *pi)
  1812. {
  1813. u8 messages;
  1814. void __iomem *mmio = hpriv->mmio;
  1815. u32 em_loc = readl(mmio + HOST_EM_LOC);
  1816. u32 em_ctl = readl(mmio + HOST_EM_CTL);
  1817. if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
  1818. return;
  1819. messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
  1820. if (messages) {
  1821. /* store em_loc */
  1822. hpriv->em_loc = ((em_loc >> 16) * 4);
  1823. hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
  1824. hpriv->em_msg_type = messages;
  1825. pi->flags |= ATA_FLAG_EM;
  1826. if (!(em_ctl & EM_CTL_ALHD))
  1827. pi->flags |= ATA_FLAG_SW_ACTIVITY;
  1828. }
  1829. }
  1830. EXPORT_SYMBOL_GPL(ahci_set_em_messages);
  1831. MODULE_AUTHOR("Jeff Garzik");
  1832. MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
  1833. MODULE_LICENSE("GPL");