1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2013-2021 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_iosf_mbi.h"
8#include "i915_reg.h"
9#include "vlv_iosf_sb.h"
10
11/*
12 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
13 * VLV_VLV2_PUNIT_HAS_0.8.docx
14 */
15
16/* Standard MMIO read, non-posted */
17#define SB_MRD_NP 0x00
18/* Standard MMIO write, non-posted */
19#define SB_MWR_NP 0x01
20/* Private register read, double-word addressing, non-posted */
21#define SB_CRRDDA_NP 0x06
22/* Private register write, double-word addressing, non-posted */
23#define SB_CRWRDA_NP 0x07
24
25static void ping(void *info)
26{
27}
28
29static void __vlv_punit_get(struct drm_i915_private *i915)
30{
31 iosf_mbi_punit_acquire();
32
33 /*
34 * Prevent the cpu from sleeping while we use this sideband, otherwise
35 * the punit may cause a machine hang. The issue appears to be isolated
36 * with changing the power state of the CPU package while changing
37 * the power state via the punit, and we have only observed it
38 * reliably on 4-core Baytail systems suggesting the issue is in the
39 * power delivery mechanism and likely to be board/function
40 * specific. Hence we presume the workaround needs only be applied
41 * to the Valleyview P-unit and not all sideband communications.
42 */
43 if (IS_VALLEYVIEW(i915)) {
44 cpu_latency_qos_update_request(req: &i915->vlv_iosf_sb.qos, new_value: 0);
45 on_each_cpu(func: ping, NULL, wait: 1);
46 }
47}
48
49static void __vlv_punit_put(struct drm_i915_private *i915)
50{
51 if (IS_VALLEYVIEW(i915))
52 cpu_latency_qos_update_request(req: &i915->vlv_iosf_sb.qos,
53 PM_QOS_DEFAULT_VALUE);
54
55 iosf_mbi_punit_release();
56}
57
58void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
59{
60 struct drm_i915_private *i915 = to_i915(dev: drm);
61
62 if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
63 __vlv_punit_get(i915);
64
65 mutex_lock(lock: &i915->vlv_iosf_sb.lock);
66
67 i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
68}
69
70void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
71{
72 struct drm_i915_private *i915 = to_i915(dev: drm);
73
74 i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
75
76 drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
77
78 mutex_unlock(lock: &i915->vlv_iosf_sb.lock);
79
80 if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
81 __vlv_punit_put(i915);
82}
83
84static int vlv_sideband_rw(struct drm_i915_private *i915,
85 u32 devfn, u32 port, u32 opcode,
86 u32 addr, u32 *val)
87{
88 struct intel_uncore *uncore = &i915->uncore;
89 const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
90 int err;
91
92 lockdep_assert_held(&i915->vlv_iosf_sb.lock);
93 if (port == IOSF_PORT_PUNIT)
94 iosf_mbi_assert_punit_acquired();
95
96 /* Flush the previous comms, just in case it failed last time. */
97 if (intel_wait_for_register(uncore,
98 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, value: 0,
99 timeout_ms: 5)) {
100 drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
101 is_read ? "read" : "write");
102 return -EAGAIN;
103 }
104
105 preempt_disable();
106
107 intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
108 intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
109 intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
110 (devfn << IOSF_DEVFN_SHIFT) |
111 (opcode << IOSF_OPCODE_SHIFT) |
112 (port << IOSF_PORT_SHIFT) |
113 (0xf << IOSF_BYTE_ENABLES_SHIFT) |
114 (0 << IOSF_BAR_SHIFT) |
115 IOSF_SB_BUSY);
116
117 if (__intel_wait_for_register_fw(uncore,
118 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, value: 0,
119 fast_timeout_us: 10000, slow_timeout_ms: 0, NULL) == 0) {
120 if (is_read)
121 *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
122 err = 0;
123 } else {
124 drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
125 is_read ? "read" : "write");
126 err = -ETIMEDOUT;
127 }
128
129 preempt_enable();
130
131 return err;
132}
133
134static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
135{
136 if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
137 unit == VLV_IOSF_SB_FLISDSI)
138 return DPIO_DEVFN;
139 else
140 return PCI_DEVFN(0, 0);
141}
142
143static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
144{
145 switch (unit) {
146 case VLV_IOSF_SB_BUNIT:
147 return IOSF_PORT_BUNIT;
148 case VLV_IOSF_SB_CCK:
149 return IOSF_PORT_CCK;
150 case VLV_IOSF_SB_CCU:
151 return IOSF_PORT_CCU;
152 case VLV_IOSF_SB_DPIO:
153 return IOSF_PORT_DPIO;
154 case VLV_IOSF_SB_DPIO_2:
155 return IOSF_PORT_DPIO_2;
156 case VLV_IOSF_SB_FLISDSI:
157 return IOSF_PORT_FLISDSI;
158 case VLV_IOSF_SB_GPIO:
159 return 0; /* FIXME: unused */
160 case VLV_IOSF_SB_NC:
161 return IOSF_PORT_NC;
162 case VLV_IOSF_SB_PUNIT:
163 return IOSF_PORT_PUNIT;
164 default:
165 return 0;
166 }
167}
168
169static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
170{
171 if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
172 return write ? SB_MWR_NP : SB_MRD_NP;
173 else
174 return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
175}
176
177u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
178{
179 struct drm_i915_private *i915 = to_i915(dev: drm);
180 u32 devfn, port, opcode, val = 0;
181
182 devfn = unit_to_devfn(unit);
183 port = unit_to_port(unit);
184 opcode = unit_to_opcode(unit, write: false);
185
186 if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
187 return 0;
188
189 drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
190
191 vlv_sideband_rw(i915, devfn, port, opcode, addr, val: &val);
192
193 return val;
194}
195
196int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
197{
198 struct drm_i915_private *i915 = to_i915(dev: drm);
199 u32 devfn, port, opcode;
200
201 devfn = unit_to_devfn(unit);
202 port = unit_to_port(unit);
203 opcode = unit_to_opcode(unit, write: true);
204
205 if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
206 return -EINVAL;
207
208 drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
209
210 return vlv_sideband_rw(i915, devfn, port, opcode, addr, val: &val);
211}
212
213void vlv_iosf_sb_init(struct drm_i915_private *i915)
214{
215 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
216 mutex_init(&i915->vlv_iosf_sb.lock);
217
218 if (IS_VALLEYVIEW(i915))
219 cpu_latency_qos_add_request(req: &i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
220}
221
222void vlv_iosf_sb_fini(struct drm_i915_private *i915)
223{
224 if (IS_VALLEYVIEW(i915))
225 cpu_latency_qos_remove_request(req: &i915->vlv_iosf_sb.qos);
226
227 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
228 mutex_destroy(lock: &i915->vlv_iosf_sb.lock);
229}
230