atombios_dp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * Copyright 2007-8 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors: Dave Airlie
  24. * Alex Deucher
  25. */
  26. #include "drmP.h"
  27. #include "radeon_drm.h"
  28. #include "radeon.h"
  29. #include "atom.h"
  30. #include "atom-bits.h"
  31. #include "drm_dp_helper.h"
  32. /* move these to drm_dp_helper.c/h */
  33. #define DP_LINK_CONFIGURATION_SIZE 9
  34. #define DP_LINK_STATUS_SIZE 6
  35. #define DP_DPCD_SIZE 8
  36. static char *voltage_names[] = {
  37. "0.4V", "0.6V", "0.8V", "1.2V"
  38. };
  39. static char *pre_emph_names[] = {
  40. "0dB", "3.5dB", "6dB", "9.5dB"
  41. };
  42. static const int dp_clocks[] = {
  43. 54000, /* 1 lane, 1.62 Ghz */
  44. 90000, /* 1 lane, 2.70 Ghz */
  45. 108000, /* 2 lane, 1.62 Ghz */
  46. 180000, /* 2 lane, 2.70 Ghz */
  47. 216000, /* 4 lane, 1.62 Ghz */
  48. 360000, /* 4 lane, 2.70 Ghz */
  49. };
  50. static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
  51. /* common helper functions */
  52. static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
  53. {
  54. int i;
  55. u8 max_link_bw;
  56. u8 max_lane_count;
  57. if (!dpcd)
  58. return 0;
  59. max_link_bw = dpcd[DP_MAX_LINK_RATE];
  60. max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
  61. switch (max_link_bw) {
  62. case DP_LINK_BW_1_62:
  63. default:
  64. for (i = 0; i < num_dp_clocks; i++) {
  65. if (i % 2)
  66. continue;
  67. switch (max_lane_count) {
  68. case 1:
  69. if (i > 1)
  70. return 0;
  71. break;
  72. case 2:
  73. if (i > 3)
  74. return 0;
  75. break;
  76. case 4:
  77. default:
  78. break;
  79. }
  80. if (dp_clocks[i] > mode_clock) {
  81. if (i < 2)
  82. return 1;
  83. else if (i < 4)
  84. return 2;
  85. else
  86. return 4;
  87. }
  88. }
  89. break;
  90. case DP_LINK_BW_2_7:
  91. for (i = 0; i < num_dp_clocks; i++) {
  92. switch (max_lane_count) {
  93. case 1:
  94. if (i > 1)
  95. return 0;
  96. break;
  97. case 2:
  98. if (i > 3)
  99. return 0;
  100. break;
  101. case 4:
  102. default:
  103. break;
  104. }
  105. if (dp_clocks[i] > mode_clock) {
  106. if (i < 2)
  107. return 1;
  108. else if (i < 4)
  109. return 2;
  110. else
  111. return 4;
  112. }
  113. }
  114. break;
  115. }
  116. return 0;
  117. }
  118. static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
  119. {
  120. int i;
  121. u8 max_link_bw;
  122. u8 max_lane_count;
  123. if (!dpcd)
  124. return 0;
  125. max_link_bw = dpcd[DP_MAX_LINK_RATE];
  126. max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
  127. switch (max_link_bw) {
  128. case DP_LINK_BW_1_62:
  129. default:
  130. for (i = 0; i < num_dp_clocks; i++) {
  131. if (i % 2)
  132. continue;
  133. switch (max_lane_count) {
  134. case 1:
  135. if (i > 1)
  136. return 0;
  137. break;
  138. case 2:
  139. if (i > 3)
  140. return 0;
  141. break;
  142. case 4:
  143. default:
  144. break;
  145. }
  146. if (dp_clocks[i] > mode_clock)
  147. return 162000;
  148. }
  149. break;
  150. case DP_LINK_BW_2_7:
  151. for (i = 0; i < num_dp_clocks; i++) {
  152. switch (max_lane_count) {
  153. case 1:
  154. if (i > 1)
  155. return 0;
  156. break;
  157. case 2:
  158. if (i > 3)
  159. return 0;
  160. break;
  161. case 4:
  162. default:
  163. break;
  164. }
  165. if (dp_clocks[i] > mode_clock)
  166. return (i % 2) ? 270000 : 162000;
  167. }
  168. }
  169. return 0;
  170. }
  171. int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
  172. {
  173. int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
  174. int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
  175. if ((lanes == 0) || (dp_clock == 0))
  176. return MODE_CLOCK_HIGH;
  177. return MODE_OK;
  178. }
  179. static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
  180. {
  181. return link_status[r - DP_LANE0_1_STATUS];
  182. }
  183. static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
  184. int lane)
  185. {
  186. int i = DP_LANE0_1_STATUS + (lane >> 1);
  187. int s = (lane & 1) * 4;
  188. u8 l = dp_link_status(link_status, i);
  189. return (l >> s) & 0xf;
  190. }
  191. static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
  192. int lane_count)
  193. {
  194. int lane;
  195. u8 lane_status;
  196. for (lane = 0; lane < lane_count; lane++) {
  197. lane_status = dp_get_lane_status(link_status, lane);
  198. if ((lane_status & DP_LANE_CR_DONE) == 0)
  199. return false;
  200. }
  201. return true;
  202. }
  203. static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
  204. int lane_count)
  205. {
  206. u8 lane_align;
  207. u8 lane_status;
  208. int lane;
  209. lane_align = dp_link_status(link_status,
  210. DP_LANE_ALIGN_STATUS_UPDATED);
  211. if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
  212. return false;
  213. for (lane = 0; lane < lane_count; lane++) {
  214. lane_status = dp_get_lane_status(link_status, lane);
  215. if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
  216. return false;
  217. }
  218. return true;
  219. }
  220. static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
  221. int lane)
  222. {
  223. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  224. int s = ((lane & 1) ?
  225. DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
  226. DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
  227. u8 l = dp_link_status(link_status, i);
  228. return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
  229. }
  230. static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
  231. int lane)
  232. {
  233. int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
  234. int s = ((lane & 1) ?
  235. DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
  236. DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
  237. u8 l = dp_link_status(link_status, i);
  238. return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
  239. }
  240. /* XXX fix me -- chip specific */
  241. #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
  242. static u8 dp_pre_emphasis_max(u8 voltage_swing)
  243. {
  244. switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
  245. case DP_TRAIN_VOLTAGE_SWING_400:
  246. return DP_TRAIN_PRE_EMPHASIS_6;
  247. case DP_TRAIN_VOLTAGE_SWING_600:
  248. return DP_TRAIN_PRE_EMPHASIS_6;
  249. case DP_TRAIN_VOLTAGE_SWING_800:
  250. return DP_TRAIN_PRE_EMPHASIS_3_5;
  251. case DP_TRAIN_VOLTAGE_SWING_1200:
  252. default:
  253. return DP_TRAIN_PRE_EMPHASIS_0;
  254. }
  255. }
  256. static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
  257. int lane_count,
  258. u8 train_set[4])
  259. {
  260. u8 v = 0;
  261. u8 p = 0;
  262. int lane;
  263. for (lane = 0; lane < lane_count; lane++) {
  264. u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
  265. u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
  266. DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
  267. lane,
  268. voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
  269. pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
  270. if (this_v > v)
  271. v = this_v;
  272. if (this_p > p)
  273. p = this_p;
  274. }
  275. if (v >= DP_VOLTAGE_MAX)
  276. v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
  277. if (p >= dp_pre_emphasis_max(v))
  278. p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
  279. DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
  280. voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
  281. pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
  282. for (lane = 0; lane < 4; lane++)
  283. train_set[lane] = v | p;
  284. }
  285. union aux_channel_transaction {
  286. PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
  287. PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
  288. };
  289. /* radeon aux chan functions */
  290. static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
  291. u8 *send, int send_bytes,
  292. u8 *recv, int recv_size,
  293. u8 delay, u8 *ack)
  294. {
  295. struct drm_device *dev = chan->dev;
  296. struct radeon_device *rdev = dev->dev_private;
  297. union aux_channel_transaction args;
  298. int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
  299. unsigned char *base;
  300. int recv_bytes;
  301. memset(&args, 0, sizeof(args));
  302. base = (unsigned char *)rdev->mode_info.atom_context->scratch;
  303. memcpy(base, send, send_bytes);
  304. args.v1.lpAuxRequest = 0;
  305. args.v1.lpDataOut = 16;
  306. args.v1.ucDataOutLen = 0;
  307. args.v1.ucChannelID = chan->rec.i2c_id;
  308. args.v1.ucDelay = delay / 10;
  309. if (ASIC_IS_DCE4(rdev))
  310. args.v2.ucHPD_ID = chan->rec.hpd;
  311. atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
  312. *ack = args.v1.ucReplyStatus;
  313. /* timeout */
  314. if (args.v1.ucReplyStatus == 1) {
  315. DRM_DEBUG_KMS("dp_aux_ch timeout\n");
  316. return -ETIMEDOUT;
  317. }
  318. /* flags not zero */
  319. if (args.v1.ucReplyStatus == 2) {
  320. DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
  321. return -EBUSY;
  322. }
  323. /* error */
  324. if (args.v1.ucReplyStatus == 3) {
  325. DRM_DEBUG_KMS("dp_aux_ch error\n");
  326. return -EIO;
  327. }
  328. recv_bytes = args.v1.ucDataOutLen;
  329. if (recv_bytes > recv_size)
  330. recv_bytes = recv_size;
  331. if (recv && recv_size)
  332. memcpy(recv, base + 16, recv_bytes);
  333. return recv_bytes;
  334. }
  335. static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
  336. u16 address, u8 *send, u8 send_bytes, u8 delay)
  337. {
  338. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  339. int ret;
  340. u8 msg[20];
  341. int msg_bytes = send_bytes + 4;
  342. u8 ack;
  343. if (send_bytes > 16)
  344. return -1;
  345. msg[0] = address;
  346. msg[1] = address >> 8;
  347. msg[2] = AUX_NATIVE_WRITE << 4;
  348. msg[3] = (msg_bytes << 4) | (send_bytes - 1);
  349. memcpy(&msg[4], send, send_bytes);
  350. while (1) {
  351. ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
  352. msg, msg_bytes, NULL, 0, delay, &ack);
  353. if (ret < 0)
  354. return ret;
  355. if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
  356. break;
  357. else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
  358. udelay(400);
  359. else
  360. return -EIO;
  361. }
  362. return send_bytes;
  363. }
  364. static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
  365. u16 address, u8 *recv, int recv_bytes, u8 delay)
  366. {
  367. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  368. u8 msg[4];
  369. int msg_bytes = 4;
  370. u8 ack;
  371. int ret;
  372. msg[0] = address;
  373. msg[1] = address >> 8;
  374. msg[2] = AUX_NATIVE_READ << 4;
  375. msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
  376. while (1) {
  377. ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
  378. msg, msg_bytes, recv, recv_bytes, delay, &ack);
  379. if (ret == 0)
  380. return -EPROTO;
  381. if (ret < 0)
  382. return ret;
  383. if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
  384. return ret;
  385. else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
  386. udelay(400);
  387. else
  388. return -EIO;
  389. }
  390. }
  391. /* radeon dp functions */
  392. static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
  393. int action, int dp_clock,
  394. uint8_t ucconfig, uint8_t lane_num)
  395. {
  396. DP_ENCODER_SERVICE_PARAMETERS args;
  397. int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
  398. memset(&args, 0, sizeof(args));
  399. args.ucLinkClock = dp_clock / 10;
  400. args.ucConfig = ucconfig;
  401. args.ucAction = action;
  402. args.ucLaneNum = lane_num;
  403. args.ucStatus = 0;
  404. atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
  405. return args.ucStatus;
  406. }
  407. u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
  408. {
  409. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  410. struct drm_device *dev = radeon_connector->base.dev;
  411. struct radeon_device *rdev = dev->dev_private;
  412. return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
  413. dig_connector->dp_i2c_bus->rec.i2c_id, 0);
  414. }
  415. bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
  416. {
  417. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  418. u8 msg[25];
  419. int ret;
  420. ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
  421. if (ret > 0) {
  422. memcpy(dig_connector->dpcd, msg, 8);
  423. {
  424. int i;
  425. DRM_DEBUG_KMS("DPCD: ");
  426. for (i = 0; i < 8; i++)
  427. DRM_DEBUG_KMS("%02x ", msg[i]);
  428. DRM_DEBUG_KMS("\n");
  429. }
  430. return true;
  431. }
  432. dig_connector->dpcd[0] = 0;
  433. return false;
  434. }
  435. void radeon_dp_set_link_config(struct drm_connector *connector,
  436. struct drm_display_mode *mode)
  437. {
  438. struct radeon_connector *radeon_connector;
  439. struct radeon_connector_atom_dig *dig_connector;
  440. if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
  441. (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
  442. return;
  443. radeon_connector = to_radeon_connector(connector);
  444. if (!radeon_connector->con_priv)
  445. return;
  446. dig_connector = radeon_connector->con_priv;
  447. dig_connector->dp_clock =
  448. dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
  449. dig_connector->dp_lane_count =
  450. dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
  451. }
  452. int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
  453. struct drm_display_mode *mode)
  454. {
  455. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  456. return dp_mode_valid(dig_connector->dpcd, mode->clock);
  457. }
  458. static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
  459. u8 link_status[DP_LINK_STATUS_SIZE])
  460. {
  461. int ret;
  462. ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
  463. link_status, DP_LINK_STATUS_SIZE, 100);
  464. if (ret <= 0) {
  465. DRM_ERROR("displayport link status failed\n");
  466. return false;
  467. }
  468. DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
  469. link_status[0], link_status[1], link_status[2],
  470. link_status[3], link_status[4], link_status[5]);
  471. return true;
  472. }
  473. bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
  474. {
  475. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  476. u8 link_status[DP_LINK_STATUS_SIZE];
  477. if (!atom_dp_get_link_status(radeon_connector, link_status))
  478. return false;
  479. if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
  480. return false;
  481. return true;
  482. }
  483. static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
  484. {
  485. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  486. if (dig_connector->dpcd[0] >= 0x11) {
  487. radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER,
  488. &power_state, 1, 0);
  489. }
  490. }
  491. static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
  492. {
  493. radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL,
  494. &downspread, 1, 0);
  495. }
  496. static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
  497. u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
  498. {
  499. radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET,
  500. link_configuration, 2, 0);
  501. }
  502. static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
  503. struct drm_encoder *encoder,
  504. u8 train_set[4])
  505. {
  506. struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
  507. int i;
  508. for (i = 0; i < dig_connector->dp_lane_count; i++)
  509. atombios_dig_transmitter_setup(encoder,
  510. ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
  511. i, train_set[i]);
  512. radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
  513. train_set, dig_connector->dp_lane_count, 0);
  514. }
  515. static void dp_set_training(struct radeon_connector *radeon_connector,
  516. u8 training)
  517. {
  518. radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
  519. &training, 1, 0);
  520. }
  521. void dp_link_train(struct drm_encoder *encoder,
  522. struct drm_connector *connector)
  523. {
  524. struct drm_device *dev = encoder->dev;
  525. struct radeon_device *rdev = dev->dev_private;
  526. struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
  527. struct radeon_encoder_atom_dig *dig;
  528. struct radeon_connector *radeon_connector;
  529. struct radeon_connector_atom_dig *dig_connector;
  530. int enc_id = 0;
  531. bool clock_recovery, channel_eq;
  532. u8 link_status[DP_LINK_STATUS_SIZE];
  533. u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
  534. u8 tries, voltage;
  535. u8 train_set[4];
  536. int i;
  537. if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
  538. (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
  539. return;
  540. if (!radeon_encoder->enc_priv)
  541. return;
  542. dig = radeon_encoder->enc_priv;
  543. radeon_connector = to_radeon_connector(connector);
  544. if (!radeon_connector->con_priv)
  545. return;
  546. dig_connector = radeon_connector->con_priv;
  547. if (dig->dig_encoder)
  548. enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
  549. else
  550. enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
  551. if (dig->linkb)
  552. enc_id |= ATOM_DP_CONFIG_LINK_B;
  553. else
  554. enc_id |= ATOM_DP_CONFIG_LINK_A;
  555. memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
  556. if (dig_connector->dp_clock == 270000)
  557. link_configuration[0] = DP_LINK_BW_2_7;
  558. else
  559. link_configuration[0] = DP_LINK_BW_1_62;
  560. link_configuration[1] = dig_connector->dp_lane_count;
  561. if (dig_connector->dpcd[0] >= 0x11)
  562. link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
  563. /* power up the sink */
  564. dp_set_power(radeon_connector, DP_SET_POWER_D0);
  565. /* disable the training pattern on the sink */
  566. dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
  567. /* set link bw and lanes on the sink */
  568. dp_set_link_bw_lanes(radeon_connector, link_configuration);
  569. /* disable downspread on the sink */
  570. dp_set_downspread(radeon_connector, 0);
  571. if (ASIC_IS_DCE4(rdev)) {
  572. /* start training on the source */
  573. atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
  574. /* set training pattern 1 on the source */
  575. atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1, 0);
  576. } else {
  577. /* start training on the source */
  578. radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
  579. dig_connector->dp_clock, enc_id, 0);
  580. /* set training pattern 1 on the source */
  581. radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
  582. dig_connector->dp_clock, enc_id, 0);
  583. }
  584. /* set initial vs/emph */
  585. memset(train_set, 0, 4);
  586. udelay(400);
  587. /* set training pattern 1 on the sink */
  588. dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
  589. dp_update_dpvs_emph(radeon_connector, encoder, train_set);
  590. /* clock recovery loop */
  591. clock_recovery = false;
  592. tries = 0;
  593. voltage = 0xff;
  594. for (;;) {
  595. udelay(100);
  596. if (!atom_dp_get_link_status(radeon_connector, link_status))
  597. break;
  598. if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
  599. clock_recovery = true;
  600. break;
  601. }
  602. for (i = 0; i < dig_connector->dp_lane_count; i++) {
  603. if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
  604. break;
  605. }
  606. if (i == dig_connector->dp_lane_count) {
  607. DRM_ERROR("clock recovery reached max voltage\n");
  608. break;
  609. }
  610. if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
  611. ++tries;
  612. if (tries == 5) {
  613. DRM_ERROR("clock recovery tried 5 times\n");
  614. break;
  615. }
  616. } else
  617. tries = 0;
  618. voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
  619. /* Compute new train_set as requested by sink */
  620. dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
  621. dp_update_dpvs_emph(radeon_connector, encoder, train_set);
  622. }
  623. if (!clock_recovery)
  624. DRM_ERROR("clock recovery failed\n");
  625. else
  626. DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
  627. train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
  628. (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
  629. DP_TRAIN_PRE_EMPHASIS_SHIFT);
  630. /* set training pattern 2 on the sink */
  631. dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
  632. /* set training pattern 2 on the source */
  633. if (ASIC_IS_DCE4(rdev))
  634. atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2, 0);
  635. else
  636. radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
  637. dig_connector->dp_clock, enc_id, 1);
  638. /* channel equalization loop */
  639. tries = 0;
  640. channel_eq = false;
  641. for (;;) {
  642. udelay(400);
  643. if (!atom_dp_get_link_status(radeon_connector, link_status))
  644. break;
  645. if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
  646. channel_eq = true;
  647. break;
  648. }
  649. /* Try 5 times */
  650. if (tries > 5) {
  651. DRM_ERROR("channel eq failed: 5 tries\n");
  652. break;
  653. }
  654. /* Compute new train_set as requested by sink */
  655. dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
  656. dp_update_dpvs_emph(radeon_connector, encoder, train_set);
  657. tries++;
  658. }
  659. if (!channel_eq)
  660. DRM_ERROR("channel eq failed\n");
  661. else
  662. DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
  663. train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
  664. (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
  665. >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
  666. /* disable the training pattern on the sink */
  667. dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
  668. /* disable the training pattern on the source */
  669. if (ASIC_IS_DCE4(rdev))
  670. atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
  671. else
  672. radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
  673. dig_connector->dp_clock, enc_id, 0);
  674. }
  675. int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
  676. u8 write_byte, u8 *read_byte)
  677. {
  678. struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
  679. struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
  680. u16 address = algo_data->address;
  681. u8 msg[5];
  682. u8 reply[2];
  683. unsigned retry;
  684. int msg_bytes;
  685. int reply_bytes = 1;
  686. int ret;
  687. u8 ack;
  688. /* Set up the command byte */
  689. if (mode & MODE_I2C_READ)
  690. msg[2] = AUX_I2C_READ << 4;
  691. else
  692. msg[2] = AUX_I2C_WRITE << 4;
  693. if (!(mode & MODE_I2C_STOP))
  694. msg[2] |= AUX_I2C_MOT << 4;
  695. msg[0] = address;
  696. msg[1] = address >> 8;
  697. switch (mode) {
  698. case MODE_I2C_WRITE:
  699. msg_bytes = 5;
  700. msg[3] = msg_bytes << 4;
  701. msg[4] = write_byte;
  702. break;
  703. case MODE_I2C_READ:
  704. msg_bytes = 4;
  705. msg[3] = msg_bytes << 4;
  706. break;
  707. default:
  708. msg_bytes = 4;
  709. msg[3] = 3 << 4;
  710. break;
  711. }
  712. for (retry = 0; retry < 4; retry++) {
  713. ret = radeon_process_aux_ch(auxch,
  714. msg, msg_bytes, reply, reply_bytes, 0, &ack);
  715. if (ret < 0) {
  716. DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
  717. return ret;
  718. }
  719. switch (ack & AUX_NATIVE_REPLY_MASK) {
  720. case AUX_NATIVE_REPLY_ACK:
  721. /* I2C-over-AUX Reply field is only valid
  722. * when paired with AUX ACK.
  723. */
  724. break;
  725. case AUX_NATIVE_REPLY_NACK:
  726. DRM_DEBUG_KMS("aux_ch native nack\n");
  727. return -EREMOTEIO;
  728. case AUX_NATIVE_REPLY_DEFER:
  729. DRM_DEBUG_KMS("aux_ch native defer\n");
  730. udelay(400);
  731. continue;
  732. default:
  733. DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
  734. return -EREMOTEIO;
  735. }
  736. switch (ack & AUX_I2C_REPLY_MASK) {
  737. case AUX_I2C_REPLY_ACK:
  738. if (mode == MODE_I2C_READ)
  739. *read_byte = reply[0];
  740. return ret;
  741. case AUX_I2C_REPLY_NACK:
  742. DRM_DEBUG_KMS("aux_i2c nack\n");
  743. return -EREMOTEIO;
  744. case AUX_I2C_REPLY_DEFER:
  745. DRM_DEBUG_KMS("aux_i2c defer\n");
  746. udelay(400);
  747. break;
  748. default:
  749. DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
  750. return -EREMOTEIO;
  751. }
  752. }
  753. DRM_ERROR("aux i2c too many retries, giving up\n");
  754. return -EREMOTEIO;
  755. }