pci-calgary.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Derived from arch/powerpc/kernel/iommu.c
  3. *
  4. * Copyright IBM Corporation, 2006-2007
  5. * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us>
  6. *
  7. * Author: Jon Mason <jdmason@kudzu.us>
  8. * Author: Muli Ben-Yehuda <muli@il.ibm.com>
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/init.h>
  25. #include <linux/types.h>
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/string.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/init.h>
  32. #include <linux/bitops.h>
  33. #include <linux/pci_ids.h>
  34. #include <linux/pci.h>
  35. #include <linux/delay.h>
  36. #include <asm/proto.h>
  37. #include <asm/calgary.h>
  38. #include <asm/tce.h>
  39. #include <asm/pci-direct.h>
  40. #include <asm/system.h>
  41. #include <asm/dma.h>
  42. #include <asm/rio.h>
  43. #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
  44. int use_calgary __read_mostly = 1;
  45. #else
  46. int use_calgary __read_mostly = 0;
  47. #endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
  48. #define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
  49. #define PCI_VENDOR_DEVICE_ID_CALGARY \
  50. (PCI_VENDOR_ID_IBM | PCI_DEVICE_ID_IBM_CALGARY << 16)
  51. /* we need these for register space address calculation */
  52. #define START_ADDRESS 0xfe000000
  53. #define CHASSIS_BASE 0
  54. #define ONE_BASED_CHASSIS_NUM 1
  55. /* register offsets inside the host bridge space */
  56. #define CALGARY_CONFIG_REG 0x0108
  57. #define PHB_CSR_OFFSET 0x0110 /* Channel Status */
  58. #define PHB_PLSSR_OFFSET 0x0120
  59. #define PHB_CONFIG_RW_OFFSET 0x0160
  60. #define PHB_IOBASE_BAR_LOW 0x0170
  61. #define PHB_IOBASE_BAR_HIGH 0x0180
  62. #define PHB_MEM_1_LOW 0x0190
  63. #define PHB_MEM_1_HIGH 0x01A0
  64. #define PHB_IO_ADDR_SIZE 0x01B0
  65. #define PHB_MEM_1_SIZE 0x01C0
  66. #define PHB_MEM_ST_OFFSET 0x01D0
  67. #define PHB_AER_OFFSET 0x0200
  68. #define PHB_CONFIG_0_HIGH 0x0220
  69. #define PHB_CONFIG_0_LOW 0x0230
  70. #define PHB_CONFIG_0_END 0x0240
  71. #define PHB_MEM_2_LOW 0x02B0
  72. #define PHB_MEM_2_HIGH 0x02C0
  73. #define PHB_MEM_2_SIZE_HIGH 0x02D0
  74. #define PHB_MEM_2_SIZE_LOW 0x02E0
  75. #define PHB_DOSHOLE_OFFSET 0x08E0
  76. /* PHB_CONFIG_RW */
  77. #define PHB_TCE_ENABLE 0x20000000
  78. #define PHB_SLOT_DISABLE 0x1C000000
  79. #define PHB_DAC_DISABLE 0x01000000
  80. #define PHB_MEM2_ENABLE 0x00400000
  81. #define PHB_MCSR_ENABLE 0x00100000
  82. /* TAR (Table Address Register) */
  83. #define TAR_SW_BITS 0x0000ffffffff800fUL
  84. #define TAR_VALID 0x0000000000000008UL
  85. /* CSR (Channel/DMA Status Register) */
  86. #define CSR_AGENT_MASK 0xffe0ffff
  87. /* CCR (Calgary Configuration Register) */
  88. #define CCR_2SEC_TIMEOUT 0x000000000000000EUL
  89. #define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
  90. #define MAX_NUM_CHASSIS 8 /* max number of chassis */
  91. /* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
  92. #define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
  93. #define PHBS_PER_CALGARY 4
  94. /* register offsets in Calgary's internal register space */
  95. static const unsigned long tar_offsets[] = {
  96. 0x0580 /* TAR0 */,
  97. 0x0588 /* TAR1 */,
  98. 0x0590 /* TAR2 */,
  99. 0x0598 /* TAR3 */
  100. };
  101. static const unsigned long split_queue_offsets[] = {
  102. 0x4870 /* SPLIT QUEUE 0 */,
  103. 0x5870 /* SPLIT QUEUE 1 */,
  104. 0x6870 /* SPLIT QUEUE 2 */,
  105. 0x7870 /* SPLIT QUEUE 3 */
  106. };
  107. static const unsigned long phb_offsets[] = {
  108. 0x8000 /* PHB0 */,
  109. 0x9000 /* PHB1 */,
  110. 0xA000 /* PHB2 */,
  111. 0xB000 /* PHB3 */
  112. };
  113. /* PHB debug registers */
  114. static const unsigned long phb_debug_offsets[] = {
  115. 0x4000 /* PHB 0 DEBUG */,
  116. 0x5000 /* PHB 1 DEBUG */,
  117. 0x6000 /* PHB 2 DEBUG */,
  118. 0x7000 /* PHB 3 DEBUG */
  119. };
  120. /*
  121. * STUFF register for each debug PHB,
  122. * byte 1 = start bus number, byte 2 = end bus number
  123. */
  124. #define PHB_DEBUG_STUFF_OFFSET 0x0020
  125. #define EMERGENCY_PAGES 32 /* = 128KB */
  126. unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
  127. static int translate_empty_slots __read_mostly = 0;
  128. static int calgary_detected __read_mostly = 0;
  129. static struct rio_table_hdr *rio_table_hdr __initdata;
  130. static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
  131. static struct rio_detail *rio_devs[MAX_NUMNODES * 4] __initdata;
  132. struct calgary_bus_info {
  133. void *tce_space;
  134. unsigned char translation_disabled;
  135. signed char phbid;
  136. void __iomem *bbar;
  137. };
  138. static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev);
  139. static void calgary_tce_cache_blast(struct iommu_table *tbl);
  140. static struct cal_chipset_ops calgary_chip_ops = {
  141. .handle_quirks = calgary_handle_quirks,
  142. .tce_cache_blast = calgary_tce_cache_blast
  143. };
  144. static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
  145. /* enable this to stress test the chip's TCE cache */
  146. #ifdef CONFIG_IOMMU_DEBUG
  147. int debugging __read_mostly = 1;
  148. static inline unsigned long verify_bit_range(unsigned long* bitmap,
  149. int expected, unsigned long start, unsigned long end)
  150. {
  151. unsigned long idx = start;
  152. BUG_ON(start >= end);
  153. while (idx < end) {
  154. if (!!test_bit(idx, bitmap) != expected)
  155. return idx;
  156. ++idx;
  157. }
  158. /* all bits have the expected value */
  159. return ~0UL;
  160. }
  161. #else /* debugging is disabled */
  162. int debugging __read_mostly = 0;
  163. static inline unsigned long verify_bit_range(unsigned long* bitmap,
  164. int expected, unsigned long start, unsigned long end)
  165. {
  166. return ~0UL;
  167. }
  168. #endif /* CONFIG_IOMMU_DEBUG */
  169. static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
  170. {
  171. unsigned int npages;
  172. npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
  173. npages >>= PAGE_SHIFT;
  174. return npages;
  175. }
  176. static inline int translate_phb(struct pci_dev* dev)
  177. {
  178. int disabled = bus_info[dev->bus->number].translation_disabled;
  179. return !disabled;
  180. }
  181. static void iommu_range_reserve(struct iommu_table *tbl,
  182. unsigned long start_addr, unsigned int npages)
  183. {
  184. unsigned long index;
  185. unsigned long end;
  186. unsigned long badbit;
  187. index = start_addr >> PAGE_SHIFT;
  188. /* bail out if we're asked to reserve a region we don't cover */
  189. if (index >= tbl->it_size)
  190. return;
  191. end = index + npages;
  192. if (end > tbl->it_size) /* don't go off the table */
  193. end = tbl->it_size;
  194. badbit = verify_bit_range(tbl->it_map, 0, index, end);
  195. if (badbit != ~0UL) {
  196. if (printk_ratelimit())
  197. printk(KERN_ERR "Calgary: entry already allocated at "
  198. "0x%lx tbl %p dma 0x%lx npages %u\n",
  199. badbit, tbl, start_addr, npages);
  200. }
  201. set_bit_string(tbl->it_map, index, npages);
  202. }
  203. static unsigned long iommu_range_alloc(struct iommu_table *tbl,
  204. unsigned int npages)
  205. {
  206. unsigned long offset;
  207. BUG_ON(npages == 0);
  208. offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
  209. tbl->it_size, npages);
  210. if (offset == ~0UL) {
  211. tbl->chip_ops->tce_cache_blast(tbl);
  212. offset = find_next_zero_string(tbl->it_map, 0,
  213. tbl->it_size, npages);
  214. if (offset == ~0UL) {
  215. printk(KERN_WARNING "Calgary: IOMMU full.\n");
  216. if (panic_on_overflow)
  217. panic("Calgary: fix the allocator.\n");
  218. else
  219. return bad_dma_address;
  220. }
  221. }
  222. set_bit_string(tbl->it_map, offset, npages);
  223. tbl->it_hint = offset + npages;
  224. BUG_ON(tbl->it_hint > tbl->it_size);
  225. return offset;
  226. }
  227. static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
  228. unsigned int npages, int direction)
  229. {
  230. unsigned long entry, flags;
  231. dma_addr_t ret = bad_dma_address;
  232. spin_lock_irqsave(&tbl->it_lock, flags);
  233. entry = iommu_range_alloc(tbl, npages);
  234. if (unlikely(entry == bad_dma_address))
  235. goto error;
  236. /* set the return dma address */
  237. ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
  238. /* put the TCEs in the HW table */
  239. tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
  240. direction);
  241. spin_unlock_irqrestore(&tbl->it_lock, flags);
  242. return ret;
  243. error:
  244. spin_unlock_irqrestore(&tbl->it_lock, flags);
  245. printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
  246. "iommu %p\n", npages, tbl);
  247. return bad_dma_address;
  248. }
  249. static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  250. unsigned int npages)
  251. {
  252. unsigned long entry;
  253. unsigned long badbit;
  254. unsigned long badend;
  255. /* were we called with bad_dma_address? */
  256. badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
  257. if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
  258. printk(KERN_ERR "Calgary: driver tried unmapping bad DMA "
  259. "address 0x%Lx\n", dma_addr);
  260. WARN_ON(1);
  261. return;
  262. }
  263. entry = dma_addr >> PAGE_SHIFT;
  264. BUG_ON(entry + npages > tbl->it_size);
  265. tce_free(tbl, entry, npages);
  266. badbit = verify_bit_range(tbl->it_map, 1, entry, entry + npages);
  267. if (badbit != ~0UL) {
  268. if (printk_ratelimit())
  269. printk(KERN_ERR "Calgary: bit is off at 0x%lx "
  270. "tbl %p dma 0x%Lx entry 0x%lx npages %u\n",
  271. badbit, tbl, dma_addr, entry, npages);
  272. }
  273. __clear_bit_string(tbl->it_map, entry, npages);
  274. }
  275. static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  276. unsigned int npages)
  277. {
  278. unsigned long flags;
  279. spin_lock_irqsave(&tbl->it_lock, flags);
  280. __iommu_free(tbl, dma_addr, npages);
  281. spin_unlock_irqrestore(&tbl->it_lock, flags);
  282. }
  283. static void __calgary_unmap_sg(struct iommu_table *tbl,
  284. struct scatterlist *sglist, int nelems, int direction)
  285. {
  286. while (nelems--) {
  287. unsigned int npages;
  288. dma_addr_t dma = sglist->dma_address;
  289. unsigned int dmalen = sglist->dma_length;
  290. if (dmalen == 0)
  291. break;
  292. npages = num_dma_pages(dma, dmalen);
  293. __iommu_free(tbl, dma, npages);
  294. sglist++;
  295. }
  296. }
  297. void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
  298. int nelems, int direction)
  299. {
  300. unsigned long flags;
  301. struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
  302. if (!translate_phb(to_pci_dev(dev)))
  303. return;
  304. spin_lock_irqsave(&tbl->it_lock, flags);
  305. __calgary_unmap_sg(tbl, sglist, nelems, direction);
  306. spin_unlock_irqrestore(&tbl->it_lock, flags);
  307. }
  308. static int calgary_nontranslate_map_sg(struct device* dev,
  309. struct scatterlist *sg, int nelems, int direction)
  310. {
  311. int i;
  312. for (i = 0; i < nelems; i++ ) {
  313. struct scatterlist *s = &sg[i];
  314. BUG_ON(!s->page);
  315. s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
  316. s->dma_length = s->length;
  317. }
  318. return nelems;
  319. }
  320. int calgary_map_sg(struct device *dev, struct scatterlist *sg,
  321. int nelems, int direction)
  322. {
  323. struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
  324. unsigned long flags;
  325. unsigned long vaddr;
  326. unsigned int npages;
  327. unsigned long entry;
  328. int i;
  329. if (!translate_phb(to_pci_dev(dev)))
  330. return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
  331. spin_lock_irqsave(&tbl->it_lock, flags);
  332. for (i = 0; i < nelems; i++ ) {
  333. struct scatterlist *s = &sg[i];
  334. BUG_ON(!s->page);
  335. vaddr = (unsigned long)page_address(s->page) + s->offset;
  336. npages = num_dma_pages(vaddr, s->length);
  337. entry = iommu_range_alloc(tbl, npages);
  338. if (entry == bad_dma_address) {
  339. /* makes sure unmap knows to stop */
  340. s->dma_length = 0;
  341. goto error;
  342. }
  343. s->dma_address = (entry << PAGE_SHIFT) | s->offset;
  344. /* insert into HW table */
  345. tce_build(tbl, entry, npages, vaddr & PAGE_MASK,
  346. direction);
  347. s->dma_length = s->length;
  348. }
  349. spin_unlock_irqrestore(&tbl->it_lock, flags);
  350. return nelems;
  351. error:
  352. __calgary_unmap_sg(tbl, sg, nelems, direction);
  353. for (i = 0; i < nelems; i++) {
  354. sg[i].dma_address = bad_dma_address;
  355. sg[i].dma_length = 0;
  356. }
  357. spin_unlock_irqrestore(&tbl->it_lock, flags);
  358. return 0;
  359. }
  360. dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
  361. size_t size, int direction)
  362. {
  363. dma_addr_t dma_handle = bad_dma_address;
  364. unsigned long uaddr;
  365. unsigned int npages;
  366. struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
  367. uaddr = (unsigned long)vaddr;
  368. npages = num_dma_pages(uaddr, size);
  369. if (translate_phb(to_pci_dev(dev)))
  370. dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
  371. else
  372. dma_handle = virt_to_bus(vaddr);
  373. return dma_handle;
  374. }
  375. void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
  376. size_t size, int direction)
  377. {
  378. struct iommu_table *tbl = to_pci_dev(dev)->bus->self->sysdata;
  379. unsigned int npages;
  380. if (!translate_phb(to_pci_dev(dev)))
  381. return;
  382. npages = num_dma_pages(dma_handle, size);
  383. iommu_free(tbl, dma_handle, npages);
  384. }
  385. void* calgary_alloc_coherent(struct device *dev, size_t size,
  386. dma_addr_t *dma_handle, gfp_t flag)
  387. {
  388. void *ret = NULL;
  389. dma_addr_t mapping;
  390. unsigned int npages, order;
  391. struct iommu_table *tbl;
  392. tbl = to_pci_dev(dev)->bus->self->sysdata;
  393. size = PAGE_ALIGN(size); /* size rounded up to full pages */
  394. npages = size >> PAGE_SHIFT;
  395. order = get_order(size);
  396. /* alloc enough pages (and possibly more) */
  397. ret = (void *)__get_free_pages(flag, order);
  398. if (!ret)
  399. goto error;
  400. memset(ret, 0, size);
  401. if (translate_phb(to_pci_dev(dev))) {
  402. /* set up tces to cover the allocated range */
  403. mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
  404. if (mapping == bad_dma_address)
  405. goto free;
  406. *dma_handle = mapping;
  407. } else /* non translated slot */
  408. *dma_handle = virt_to_bus(ret);
  409. return ret;
  410. free:
  411. free_pages((unsigned long)ret, get_order(size));
  412. ret = NULL;
  413. error:
  414. return ret;
  415. }
  416. static const struct dma_mapping_ops calgary_dma_ops = {
  417. .alloc_coherent = calgary_alloc_coherent,
  418. .map_single = calgary_map_single,
  419. .unmap_single = calgary_unmap_single,
  420. .map_sg = calgary_map_sg,
  421. .unmap_sg = calgary_unmap_sg,
  422. };
  423. static inline void __iomem * busno_to_bbar(unsigned char num)
  424. {
  425. return bus_info[num].bbar;
  426. }
  427. static inline int busno_to_phbid(unsigned char num)
  428. {
  429. return bus_info[num].phbid;
  430. }
  431. static inline unsigned long split_queue_offset(unsigned char num)
  432. {
  433. size_t idx = busno_to_phbid(num);
  434. return split_queue_offsets[idx];
  435. }
  436. static inline unsigned long tar_offset(unsigned char num)
  437. {
  438. size_t idx = busno_to_phbid(num);
  439. return tar_offsets[idx];
  440. }
  441. static inline unsigned long phb_offset(unsigned char num)
  442. {
  443. size_t idx = busno_to_phbid(num);
  444. return phb_offsets[idx];
  445. }
  446. static inline void __iomem* calgary_reg(void __iomem *bar, unsigned long offset)
  447. {
  448. unsigned long target = ((unsigned long)bar) | offset;
  449. return (void __iomem*)target;
  450. }
  451. static void calgary_tce_cache_blast(struct iommu_table *tbl)
  452. {
  453. u64 val;
  454. u32 aer;
  455. int i = 0;
  456. void __iomem *bbar = tbl->bbar;
  457. void __iomem *target;
  458. /* disable arbitration on the bus */
  459. target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
  460. aer = readl(target);
  461. writel(0, target);
  462. /* read plssr to ensure it got there */
  463. target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_PLSSR_OFFSET);
  464. val = readl(target);
  465. /* poll split queues until all DMA activity is done */
  466. target = calgary_reg(bbar, split_queue_offset(tbl->it_busno));
  467. do {
  468. val = readq(target);
  469. i++;
  470. } while ((val & 0xff) != 0xff && i < 100);
  471. if (i == 100)
  472. printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
  473. "continuing anyway\n");
  474. /* invalidate TCE cache */
  475. target = calgary_reg(bbar, tar_offset(tbl->it_busno));
  476. writeq(tbl->tar_val, target);
  477. /* enable arbitration */
  478. target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_AER_OFFSET);
  479. writel(aer, target);
  480. (void)readl(target); /* flush */
  481. }
  482. static void __init calgary_reserve_mem_region(struct pci_dev *dev, u64 start,
  483. u64 limit)
  484. {
  485. unsigned int numpages;
  486. limit = limit | 0xfffff;
  487. limit++;
  488. numpages = ((limit - start) >> PAGE_SHIFT);
  489. iommu_range_reserve(dev->sysdata, start, numpages);
  490. }
  491. static void __init calgary_reserve_peripheral_mem_1(struct pci_dev *dev)
  492. {
  493. void __iomem *target;
  494. u64 low, high, sizelow;
  495. u64 start, limit;
  496. struct iommu_table *tbl = dev->sysdata;
  497. unsigned char busnum = dev->bus->number;
  498. void __iomem *bbar = tbl->bbar;
  499. /* peripheral MEM_1 region */
  500. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_LOW);
  501. low = be32_to_cpu(readl(target));
  502. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_HIGH);
  503. high = be32_to_cpu(readl(target));
  504. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_1_SIZE);
  505. sizelow = be32_to_cpu(readl(target));
  506. start = (high << 32) | low;
  507. limit = sizelow;
  508. calgary_reserve_mem_region(dev, start, limit);
  509. }
  510. static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
  511. {
  512. void __iomem *target;
  513. u32 val32;
  514. u64 low, high, sizelow, sizehigh;
  515. u64 start, limit;
  516. struct iommu_table *tbl = dev->sysdata;
  517. unsigned char busnum = dev->bus->number;
  518. void __iomem *bbar = tbl->bbar;
  519. /* is it enabled? */
  520. target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
  521. val32 = be32_to_cpu(readl(target));
  522. if (!(val32 & PHB_MEM2_ENABLE))
  523. return;
  524. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_LOW);
  525. low = be32_to_cpu(readl(target));
  526. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_HIGH);
  527. high = be32_to_cpu(readl(target));
  528. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_LOW);
  529. sizelow = be32_to_cpu(readl(target));
  530. target = calgary_reg(bbar, phb_offset(busnum) | PHB_MEM_2_SIZE_HIGH);
  531. sizehigh = be32_to_cpu(readl(target));
  532. start = (high << 32) | low;
  533. limit = (sizehigh << 32) | sizelow;
  534. calgary_reserve_mem_region(dev, start, limit);
  535. }
  536. /*
  537. * some regions of the IO address space do not get translated, so we
  538. * must not give devices IO addresses in those regions. The regions
  539. * are the 640KB-1MB region and the two PCI peripheral memory holes.
  540. * Reserve all of them in the IOMMU bitmap to avoid giving them out
  541. * later.
  542. */
  543. static void __init calgary_reserve_regions(struct pci_dev *dev)
  544. {
  545. unsigned int npages;
  546. u64 start;
  547. struct iommu_table *tbl = dev->sysdata;
  548. /* reserve EMERGENCY_PAGES from bad_dma_address and up */
  549. iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
  550. /* avoid the BIOS/VGA first 640KB-1MB region */
  551. start = (640 * 1024);
  552. npages = ((1024 - 640) * 1024) >> PAGE_SHIFT;
  553. iommu_range_reserve(tbl, start, npages);
  554. /* reserve the two PCI peripheral memory regions in IO space */
  555. calgary_reserve_peripheral_mem_1(dev);
  556. calgary_reserve_peripheral_mem_2(dev);
  557. }
  558. static int __init calgary_setup_tar(struct pci_dev *dev, void __iomem *bbar)
  559. {
  560. u64 val64;
  561. u64 table_phys;
  562. void __iomem *target;
  563. int ret;
  564. struct iommu_table *tbl;
  565. /* build TCE tables for each PHB */
  566. ret = build_tce_table(dev, bbar);
  567. if (ret)
  568. return ret;
  569. tbl = dev->sysdata;
  570. tbl->it_base = (unsigned long)bus_info[dev->bus->number].tce_space;
  571. tce_free(tbl, 0, tbl->it_size);
  572. tbl->chip_ops = &calgary_chip_ops;
  573. calgary_reserve_regions(dev);
  574. /* set TARs for each PHB */
  575. target = calgary_reg(bbar, tar_offset(dev->bus->number));
  576. val64 = be64_to_cpu(readq(target));
  577. /* zero out all TAR bits under sw control */
  578. val64 &= ~TAR_SW_BITS;
  579. tbl = dev->sysdata;
  580. table_phys = (u64)__pa(tbl->it_base);
  581. val64 |= table_phys;
  582. BUG_ON(specified_table_size > TCE_TABLE_SIZE_8M);
  583. val64 |= (u64) specified_table_size;
  584. tbl->tar_val = cpu_to_be64(val64);
  585. writeq(tbl->tar_val, target);
  586. readq(target); /* flush */
  587. return 0;
  588. }
  589. static void __init calgary_free_bus(struct pci_dev *dev)
  590. {
  591. u64 val64;
  592. struct iommu_table *tbl = dev->sysdata;
  593. void __iomem *target;
  594. unsigned int bitmapsz;
  595. target = calgary_reg(tbl->bbar, tar_offset(dev->bus->number));
  596. val64 = be64_to_cpu(readq(target));
  597. val64 &= ~TAR_SW_BITS;
  598. writeq(cpu_to_be64(val64), target);
  599. readq(target); /* flush */
  600. bitmapsz = tbl->it_size / BITS_PER_BYTE;
  601. free_pages((unsigned long)tbl->it_map, get_order(bitmapsz));
  602. tbl->it_map = NULL;
  603. kfree(tbl);
  604. dev->sysdata = NULL;
  605. /* Can't free bootmem allocated memory after system is up :-( */
  606. bus_info[dev->bus->number].tce_space = NULL;
  607. }
  608. static void calgary_watchdog(unsigned long data)
  609. {
  610. struct pci_dev *dev = (struct pci_dev *)data;
  611. struct iommu_table *tbl = dev->sysdata;
  612. void __iomem *bbar = tbl->bbar;
  613. u32 val32;
  614. void __iomem *target;
  615. target = calgary_reg(bbar, phb_offset(tbl->it_busno) | PHB_CSR_OFFSET);
  616. val32 = be32_to_cpu(readl(target));
  617. /* If no error, the agent ID in the CSR is not valid */
  618. if (val32 & CSR_AGENT_MASK) {
  619. printk(KERN_EMERG "calgary_watchdog: DMA error on PHB %#x, "
  620. "CSR = %#x\n", dev->bus->number, val32);
  621. writel(0, target);
  622. /* Disable bus that caused the error */
  623. target = calgary_reg(bbar, phb_offset(tbl->it_busno) |
  624. PHB_CONFIG_RW_OFFSET);
  625. val32 = be32_to_cpu(readl(target));
  626. val32 |= PHB_SLOT_DISABLE;
  627. writel(cpu_to_be32(val32), target);
  628. readl(target); /* flush */
  629. } else {
  630. /* Reset the timer */
  631. mod_timer(&tbl->watchdog_timer, jiffies + 2 * HZ);
  632. }
  633. }
  634. static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
  635. unsigned char busnum, unsigned long timeout)
  636. {
  637. u64 val64;
  638. void __iomem *target;
  639. unsigned int phb_shift = ~0; /* silence gcc */
  640. u64 mask;
  641. switch (busno_to_phbid(busnum)) {
  642. case 0: phb_shift = (63 - 19);
  643. break;
  644. case 1: phb_shift = (63 - 23);
  645. break;
  646. case 2: phb_shift = (63 - 27);
  647. break;
  648. case 3: phb_shift = (63 - 35);
  649. break;
  650. default:
  651. BUG_ON(busno_to_phbid(busnum));
  652. }
  653. target = calgary_reg(bbar, CALGARY_CONFIG_REG);
  654. val64 = be64_to_cpu(readq(target));
  655. /* zero out this PHB's timer bits */
  656. mask = ~(0xFUL << phb_shift);
  657. val64 &= mask;
  658. val64 |= (timeout << phb_shift);
  659. writeq(cpu_to_be64(val64), target);
  660. readq(target); /* flush */
  661. }
  662. static void __init calgary_handle_quirks(struct iommu_table *tbl,
  663. struct pci_dev *dev)
  664. {
  665. unsigned char busnum = dev->bus->number;
  666. /*
  667. * Give split completion a longer timeout on bus 1 for aic94xx
  668. * http://bugzilla.kernel.org/show_bug.cgi?id=7180
  669. */
  670. if (busnum == 1)
  671. calgary_set_split_completion_timeout(tbl->bbar, busnum,
  672. CCR_2SEC_TIMEOUT);
  673. }
  674. static void __init calgary_enable_translation(struct pci_dev *dev)
  675. {
  676. u32 val32;
  677. unsigned char busnum;
  678. void __iomem *target;
  679. void __iomem *bbar;
  680. struct iommu_table *tbl;
  681. busnum = dev->bus->number;
  682. tbl = dev->sysdata;
  683. bbar = tbl->bbar;
  684. /* enable TCE in PHB Config Register */
  685. target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
  686. val32 = be32_to_cpu(readl(target));
  687. val32 |= PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE;
  688. printk(KERN_INFO "Calgary: enabling translation on PHB %#x\n", busnum);
  689. printk(KERN_INFO "Calgary: errant DMAs will now be prevented on this "
  690. "bus.\n");
  691. writel(cpu_to_be32(val32), target);
  692. readl(target); /* flush */
  693. init_timer(&tbl->watchdog_timer);
  694. tbl->watchdog_timer.function = &calgary_watchdog;
  695. tbl->watchdog_timer.data = (unsigned long)dev;
  696. mod_timer(&tbl->watchdog_timer, jiffies);
  697. }
  698. static void __init calgary_disable_translation(struct pci_dev *dev)
  699. {
  700. u32 val32;
  701. unsigned char busnum;
  702. void __iomem *target;
  703. void __iomem *bbar;
  704. struct iommu_table *tbl;
  705. busnum = dev->bus->number;
  706. tbl = dev->sysdata;
  707. bbar = tbl->bbar;
  708. /* disable TCE in PHB Config Register */
  709. target = calgary_reg(bbar, phb_offset(busnum) | PHB_CONFIG_RW_OFFSET);
  710. val32 = be32_to_cpu(readl(target));
  711. val32 &= ~(PHB_TCE_ENABLE | PHB_DAC_DISABLE | PHB_MCSR_ENABLE);
  712. printk(KERN_INFO "Calgary: disabling translation on PHB %#x!\n", busnum);
  713. writel(cpu_to_be32(val32), target);
  714. readl(target); /* flush */
  715. del_timer_sync(&tbl->watchdog_timer);
  716. }
  717. static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
  718. {
  719. pci_dev_get(dev);
  720. dev->sysdata = NULL;
  721. dev->bus->self = dev;
  722. }
  723. static int __init calgary_init_one(struct pci_dev *dev)
  724. {
  725. void __iomem *bbar;
  726. struct iommu_table *tbl;
  727. int ret;
  728. BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
  729. bbar = busno_to_bbar(dev->bus->number);
  730. ret = calgary_setup_tar(dev, bbar);
  731. if (ret)
  732. goto done;
  733. pci_dev_get(dev);
  734. dev->bus->self = dev;
  735. tbl = dev->sysdata;
  736. tbl->chip_ops->handle_quirks(tbl, dev);
  737. calgary_enable_translation(dev);
  738. return 0;
  739. done:
  740. return ret;
  741. }
  742. static int __init calgary_locate_bbars(void)
  743. {
  744. int ret;
  745. int rioidx, phb, bus;
  746. void __iomem *bbar;
  747. void __iomem *target;
  748. unsigned long offset;
  749. u8 start_bus, end_bus;
  750. u32 val;
  751. ret = -ENODATA;
  752. for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
  753. struct rio_detail *rio = rio_devs[rioidx];
  754. if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
  755. continue;
  756. /* map entire 1MB of Calgary config space */
  757. bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
  758. if (!bbar)
  759. goto error;
  760. for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
  761. offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
  762. target = calgary_reg(bbar, offset);
  763. val = be32_to_cpu(readl(target));
  764. start_bus = (u8)((val & 0x00FF0000) >> 16);
  765. end_bus = (u8)((val & 0x0000FF00) >> 8);
  766. for (bus = start_bus; bus <= end_bus; bus++) {
  767. bus_info[bus].bbar = bbar;
  768. bus_info[bus].phbid = phb;
  769. }
  770. }
  771. }
  772. return 0;
  773. error:
  774. /* scan bus_info and iounmap any bbars we previously ioremap'd */
  775. for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
  776. if (bus_info[bus].bbar)
  777. iounmap(bus_info[bus].bbar);
  778. return ret;
  779. }
  780. static int __init calgary_init(void)
  781. {
  782. int ret;
  783. struct pci_dev *dev = NULL;
  784. ret = calgary_locate_bbars();
  785. if (ret)
  786. return ret;
  787. do {
  788. dev = pci_get_device(PCI_VENDOR_ID_IBM,
  789. PCI_DEVICE_ID_IBM_CALGARY,
  790. dev);
  791. if (!dev)
  792. break;
  793. if (!translate_phb(dev)) {
  794. calgary_init_one_nontraslated(dev);
  795. continue;
  796. }
  797. if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots)
  798. continue;
  799. ret = calgary_init_one(dev);
  800. if (ret)
  801. goto error;
  802. } while (1);
  803. return ret;
  804. error:
  805. do {
  806. dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
  807. PCI_DEVICE_ID_IBM_CALGARY,
  808. dev);
  809. if (!dev)
  810. break;
  811. if (!translate_phb(dev)) {
  812. pci_dev_put(dev);
  813. continue;
  814. }
  815. if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots)
  816. continue;
  817. calgary_disable_translation(dev);
  818. calgary_free_bus(dev);
  819. pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
  820. } while (1);
  821. return ret;
  822. }
  823. static inline int __init determine_tce_table_size(u64 ram)
  824. {
  825. int ret;
  826. if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
  827. return specified_table_size;
  828. /*
  829. * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
  830. * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
  831. * larger table size has twice as many entries, so shift the
  832. * max ram address by 13 to divide by 8K and then look at the
  833. * order of the result to choose between 0-7.
  834. */
  835. ret = get_order(ram >> 13);
  836. if (ret > TCE_TABLE_SIZE_8M)
  837. ret = TCE_TABLE_SIZE_8M;
  838. return ret;
  839. }
  840. static int __init build_detail_arrays(void)
  841. {
  842. unsigned long ptr;
  843. int i, scal_detail_size, rio_detail_size;
  844. if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
  845. printk(KERN_WARNING
  846. "Calgary: MAX_NUMNODES too low! Defined as %d, "
  847. "but system has %d nodes.\n",
  848. MAX_NUMNODES, rio_table_hdr->num_scal_dev);
  849. return -ENODEV;
  850. }
  851. switch (rio_table_hdr->version){
  852. case 2:
  853. scal_detail_size = 11;
  854. rio_detail_size = 13;
  855. break;
  856. case 3:
  857. scal_detail_size = 12;
  858. rio_detail_size = 15;
  859. break;
  860. default:
  861. printk(KERN_WARNING
  862. "Calgary: Invalid Rio Grande Table Version: %d\n",
  863. rio_table_hdr->version);
  864. return -EPROTO;
  865. }
  866. ptr = ((unsigned long)rio_table_hdr) + 3;
  867. for (i = 0; i < rio_table_hdr->num_scal_dev;
  868. i++, ptr += scal_detail_size)
  869. scal_devs[i] = (struct scal_detail *)ptr;
  870. for (i = 0; i < rio_table_hdr->num_rio_dev;
  871. i++, ptr += rio_detail_size)
  872. rio_devs[i] = (struct rio_detail *)ptr;
  873. return 0;
  874. }
  875. void __init detect_calgary(void)
  876. {
  877. u32 val;
  878. int bus;
  879. void *tbl;
  880. int calgary_found = 0;
  881. unsigned long ptr;
  882. unsigned int offset, prev_offset;
  883. int ret;
  884. /*
  885. * if the user specified iommu=off or iommu=soft or we found
  886. * another HW IOMMU already, bail out.
  887. */
  888. if (swiotlb || no_iommu || iommu_detected)
  889. return;
  890. if (!use_calgary)
  891. return;
  892. if (!early_pci_allowed())
  893. return;
  894. printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n");
  895. ptr = (unsigned long)phys_to_virt(get_bios_ebda());
  896. rio_table_hdr = NULL;
  897. prev_offset = 0;
  898. offset = 0x180;
  899. /*
  900. * The next offset is stored in the 1st word.
  901. * Only parse up until the offset increases:
  902. */
  903. while (offset > prev_offset) {
  904. /* The block id is stored in the 2nd word */
  905. if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
  906. /* set the pointer past the offset & block id */
  907. rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
  908. break;
  909. }
  910. prev_offset = offset;
  911. offset = *((unsigned short *)(ptr + offset));
  912. }
  913. if (!rio_table_hdr) {
  914. printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table "
  915. "in EBDA - bailing!\n");
  916. return;
  917. }
  918. ret = build_detail_arrays();
  919. if (ret) {
  920. printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret);
  921. return;
  922. }
  923. specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
  924. for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
  925. int dev;
  926. struct calgary_bus_info *info = &bus_info[bus];
  927. if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
  928. continue;
  929. if (info->translation_disabled)
  930. continue;
  931. /*
  932. * Scan the slots of the PCI bus to see if there is a device present.
  933. * The parent bus will be the zero-ith device, so start at 1.
  934. */
  935. for (dev = 1; dev < 8; dev++) {
  936. val = read_pci_config(bus, dev, 0, 0);
  937. if (val != 0xffffffff || translate_empty_slots) {
  938. tbl = alloc_tce_table();
  939. if (!tbl)
  940. goto cleanup;
  941. info->tce_space = tbl;
  942. calgary_found = 1;
  943. break;
  944. }
  945. }
  946. }
  947. printk(KERN_DEBUG "Calgary: finished detection, Calgary %s\n",
  948. calgary_found ? "found" : "not found");
  949. if (calgary_found) {
  950. iommu_detected = 1;
  951. calgary_detected = 1;
  952. printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected.\n");
  953. printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
  954. "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
  955. debugging ? "enabled" : "disabled");
  956. }
  957. return;
  958. cleanup:
  959. for (--bus; bus >= 0; --bus) {
  960. struct calgary_bus_info *info = &bus_info[bus];
  961. if (info->tce_space)
  962. free_tce_table(info->tce_space);
  963. }
  964. }
  965. int __init calgary_iommu_init(void)
  966. {
  967. int ret;
  968. if (no_iommu || swiotlb)
  969. return -ENODEV;
  970. if (!calgary_detected)
  971. return -ENODEV;
  972. /* ok, we're trying to use Calgary - let's roll */
  973. printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
  974. ret = calgary_init();
  975. if (ret) {
  976. printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
  977. "falling back to no_iommu\n", ret);
  978. if (end_pfn > MAX_DMA32_PFN)
  979. printk(KERN_ERR "WARNING more than 4GB of memory, "
  980. "32bit PCI may malfunction.\n");
  981. return ret;
  982. }
  983. force_iommu = 1;
  984. bad_dma_address = 0x0;
  985. dma_ops = &calgary_dma_ops;
  986. return 0;
  987. }
  988. static int __init calgary_parse_options(char *p)
  989. {
  990. unsigned int bridge;
  991. size_t len;
  992. char* endp;
  993. while (*p) {
  994. if (!strncmp(p, "64k", 3))
  995. specified_table_size = TCE_TABLE_SIZE_64K;
  996. else if (!strncmp(p, "128k", 4))
  997. specified_table_size = TCE_TABLE_SIZE_128K;
  998. else if (!strncmp(p, "256k", 4))
  999. specified_table_size = TCE_TABLE_SIZE_256K;
  1000. else if (!strncmp(p, "512k", 4))
  1001. specified_table_size = TCE_TABLE_SIZE_512K;
  1002. else if (!strncmp(p, "1M", 2))
  1003. specified_table_size = TCE_TABLE_SIZE_1M;
  1004. else if (!strncmp(p, "2M", 2))
  1005. specified_table_size = TCE_TABLE_SIZE_2M;
  1006. else if (!strncmp(p, "4M", 2))
  1007. specified_table_size = TCE_TABLE_SIZE_4M;
  1008. else if (!strncmp(p, "8M", 2))
  1009. specified_table_size = TCE_TABLE_SIZE_8M;
  1010. len = strlen("translate_empty_slots");
  1011. if (!strncmp(p, "translate_empty_slots", len))
  1012. translate_empty_slots = 1;
  1013. len = strlen("disable");
  1014. if (!strncmp(p, "disable", len)) {
  1015. p += len;
  1016. if (*p == '=')
  1017. ++p;
  1018. if (*p == '\0')
  1019. break;
  1020. bridge = simple_strtol(p, &endp, 0);
  1021. if (p == endp)
  1022. break;
  1023. if (bridge < MAX_PHB_BUS_NUM) {
  1024. printk(KERN_INFO "Calgary: disabling "
  1025. "translation for PHB %#x\n", bridge);
  1026. bus_info[bridge].translation_disabled = 1;
  1027. }
  1028. }
  1029. p = strpbrk(p, ",");
  1030. if (!p)
  1031. break;
  1032. p++; /* skip ',' */
  1033. }
  1034. return 1;
  1035. }
  1036. __setup("calgary=", calgary_parse_options);