| @@ -1,1438 +1,1442 @@ | | | @@ -1,1438 +1,1442 @@ |
1 | /* | | 1 | /* |
2 | * Copyright (c) 2006 Luc Verhaegen (quirks list) | | 2 | * Copyright (c) 2006 Luc Verhaegen (quirks list) |
3 | * Copyright (c) 2007-2008 Intel Corporation | | 3 | * Copyright (c) 2007-2008 Intel Corporation |
4 | * Jesse Barnes <jesse.barnes@intel.com> | | 4 | * Jesse Barnes <jesse.barnes@intel.com> |
5 | * Copyright 2010 Red Hat, Inc. | | 5 | * Copyright 2010 Red Hat, Inc. |
6 | * | | 6 | * |
7 | * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from | | 7 | * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from |
8 | * FB layer. | | 8 | * FB layer. |
9 | * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com> | | 9 | * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com> |
10 | * | | 10 | * |
11 | * Permission is hereby granted, free of charge, to any person obtaining a | | 11 | * Permission is hereby granted, free of charge, to any person obtaining a |
12 | * copy of this software and associated documentation files (the "Software"), | | 12 | * copy of this software and associated documentation files (the "Software"), |
13 | * to deal in the Software without restriction, including without limitation | | 13 | * to deal in the Software without restriction, including without limitation |
14 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | | 14 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
15 | * and/or sell copies of the Software, and to permit persons to whom the | | 15 | * and/or sell copies of the Software, and to permit persons to whom the |
16 | * Software is furnished to do so, subject to the following conditions: | | 16 | * Software is furnished to do so, subject to the following conditions: |
17 | * | | 17 | * |
18 | * The above copyright notice and this permission notice (including the | | 18 | * The above copyright notice and this permission notice (including the |
19 | * next paragraph) shall be included in all copies or substantial portions | | 19 | * next paragraph) shall be included in all copies or substantial portions |
20 | * of the Software. | | 20 | * of the Software. |
21 | * | | 21 | * |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | | 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | | 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | | 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
25 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | | 25 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | | 26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | | 27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
28 | * DEALINGS IN THE SOFTWARE. | | 28 | * DEALINGS IN THE SOFTWARE. |
29 | */ | | 29 | */ |
30 | #include <linux/kernel.h> | | 30 | #include <linux/kernel.h> |
31 | #include <linux/slab.h> | | 31 | #include <linux/slab.h> |
32 | #include <linux/i2c.h> | | 32 | #include <linux/i2c.h> |
33 | #include <linux/module.h> | | 33 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | | 34 | #include <linux/moduleparam.h> |
35 | #include <linux/export.h> | | 35 | #include <linux/export.h> |
36 | #include <linux/printk.h> | | 36 | #include <linux/printk.h> |
37 | #include <asm/byteorder.h> | | 37 | #include <asm/byteorder.h> |
38 | #include <drm/drmP.h> | | 38 | #include <drm/drmP.h> |
39 | #include <drm/drm_edid.h> | | 39 | #include <drm/drm_edid.h> |
40 | #include "drm_edid_modes.h" | | 40 | #include "drm_edid_modes.h" |
41 | | | 41 | |
42 | #define version_greater(edid, maj, min) \ | | 42 | #define version_greater(edid, maj, min) \ |
43 | (((edid)->version > (maj)) || \ | | 43 | (((edid)->version > (maj)) || \ |
44 | ((edid)->version == (maj) && (edid)->revision > (min))) | | 44 | ((edid)->version == (maj) && (edid)->revision > (min))) |
45 | | | 45 | |
46 | #define EDID_EST_TIMINGS 16 | | 46 | #define EDID_EST_TIMINGS 16 |
47 | #define EDID_STD_TIMINGS 8 | | 47 | #define EDID_STD_TIMINGS 8 |
48 | #define EDID_DETAILED_TIMINGS 4 | | 48 | #define EDID_DETAILED_TIMINGS 4 |
49 | | | 49 | |
50 | /* | | 50 | /* |
51 | * EDID blocks out in the wild have a variety of bugs, try to collect | | 51 | * EDID blocks out in the wild have a variety of bugs, try to collect |
52 | * them here (note that userspace may work around broken monitors first, | | 52 | * them here (note that userspace may work around broken monitors first, |
53 | * but fixes should make their way here so that the kernel "just works" | | 53 | * but fixes should make their way here so that the kernel "just works" |
54 | * on as many displays as possible). | | 54 | * on as many displays as possible). |
55 | */ | | 55 | */ |
56 | | | 56 | |
57 | /* First detailed mode wrong, use largest 60Hz mode */ | | 57 | /* First detailed mode wrong, use largest 60Hz mode */ |
58 | #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) | | 58 | #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) |
59 | /* Reported 135MHz pixel clock is too high, needs adjustment */ | | 59 | /* Reported 135MHz pixel clock is too high, needs adjustment */ |
60 | #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) | | 60 | #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) |
61 | /* Prefer the largest mode at 75 Hz */ | | 61 | /* Prefer the largest mode at 75 Hz */ |
62 | #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) | | 62 | #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) |
63 | /* Detail timing is in cm not mm */ | | 63 | /* Detail timing is in cm not mm */ |
64 | #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) | | 64 | #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) |
65 | /* Detailed timing descriptors have bogus size values, so just take the | | 65 | /* Detailed timing descriptors have bogus size values, so just take the |
66 | * maximum size and use that. | | 66 | * maximum size and use that. |
67 | */ | | 67 | */ |
68 | #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) | | 68 | #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) |
69 | /* Monitor forgot to set the first detailed is preferred bit. */ | | 69 | /* Monitor forgot to set the first detailed is preferred bit. */ |
70 | #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) | | 70 | #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) |
71 | /* use +hsync +vsync for detailed mode */ | | 71 | /* use +hsync +vsync for detailed mode */ |
72 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) | | 72 | #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) |
73 | /* Force reduced-blanking timings for detailed modes */ | | 73 | /* Force reduced-blanking timings for detailed modes */ |
74 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) | | 74 | #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) |
75 | | | 75 | |
76 | struct detailed_mode_closure { | | 76 | struct detailed_mode_closure { |
77 | struct drm_connector *connector; | | 77 | struct drm_connector *connector; |
78 | struct edid *edid; | | 78 | struct edid *edid; |
79 | bool preferred; | | 79 | bool preferred; |
80 | u32 quirks; | | 80 | u32 quirks; |
81 | int modes; | | 81 | int modes; |
82 | }; | | 82 | }; |
83 | | | 83 | |
84 | #define LEVEL_DMT 0 | | 84 | #define LEVEL_DMT 0 |
85 | #define LEVEL_GTF 1 | | 85 | #define LEVEL_GTF 1 |
86 | #define LEVEL_GTF2 2 | | 86 | #define LEVEL_GTF2 2 |
87 | #define LEVEL_CVT 3 | | 87 | #define LEVEL_CVT 3 |
88 | | | 88 | |
89 | static struct edid_quirk { | | 89 | static struct edid_quirk { |
90 | char vendor[4]; | | 90 | char vendor[4]; |
91 | int product_id; | | 91 | int product_id; |
92 | u32 quirks; | | 92 | u32 quirks; |
93 | } edid_quirk_list[] = { | | 93 | } edid_quirk_list[] = { |
94 | /* ASUS VW222S */ | | 94 | /* ASUS VW222S */ |
95 | { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, | | 95 | { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, |
96 | | | 96 | |
97 | /* Acer AL1706 */ | | 97 | /* Acer AL1706 */ |
98 | { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, | | 98 | { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, |
99 | /* Acer F51 */ | | 99 | /* Acer F51 */ |
100 | { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, | | 100 | { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, |
101 | /* Unknown Acer */ | | 101 | /* Unknown Acer */ |
102 | { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | | 102 | { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, |
103 | | | 103 | |
104 | /* Belinea 10 15 55 */ | | 104 | /* Belinea 10 15 55 */ |
105 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, | | 105 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, |
106 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, | | 106 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, |
107 | | | 107 | |
108 | /* Envision Peripherals, Inc. EN-7100e */ | | 108 | /* Envision Peripherals, Inc. EN-7100e */ |
109 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, | | 109 | { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, |
110 | /* Envision EN2028 */ | | 110 | /* Envision EN2028 */ |
111 | { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, | | 111 | { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, |
112 | | | 112 | |
113 | /* Funai Electronics PM36B */ | | 113 | /* Funai Electronics PM36B */ |
114 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | | | 114 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | |
115 | EDID_QUIRK_DETAILED_IN_CM }, | | 115 | EDID_QUIRK_DETAILED_IN_CM }, |
116 | | | 116 | |
117 | /* LG Philips LCD LP154W01-A5 */ | | 117 | /* LG Philips LCD LP154W01-A5 */ |
118 | { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, | | 118 | { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, |
119 | { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, | | 119 | { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, |
120 | | | 120 | |
121 | /* Philips 107p5 CRT */ | | 121 | /* Philips 107p5 CRT */ |
122 | { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | | 122 | { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, |
123 | | | 123 | |
124 | /* Proview AY765C */ | | 124 | /* Proview AY765C */ |
125 | { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, | | 125 | { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, |
126 | | | 126 | |
127 | /* Samsung SyncMaster 205BW. Note: irony */ | | 127 | /* Samsung SyncMaster 205BW. Note: irony */ |
128 | { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, | | 128 | { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, |
129 | /* Samsung SyncMaster 22[5-6]BW */ | | 129 | /* Samsung SyncMaster 22[5-6]BW */ |
130 | { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, | | 130 | { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, |
131 | { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, | | 131 | { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, |
132 | | | 132 | |
133 | /* ViewSonic VA2026w */ | | 133 | /* ViewSonic VA2026w */ |
134 | { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, | | 134 | { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, |
135 | }; | | 135 | }; |
136 | | | 136 | |
137 | /*** DDC fetch and block validation ***/ | | 137 | /*** DDC fetch and block validation ***/ |
138 | | | 138 | |
139 | static const u8 edid_header[] = { | | 139 | static const u8 edid_header[] = { |
140 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 | | 140 | 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 |
141 | }; | | 141 | }; |
142 | | | 142 | |
143 | /* | | 143 | /* |
144 | * Sanity check the header of the base EDID block. Return 8 if the header | | 144 | * Sanity check the header of the base EDID block. Return 8 if the header |
145 | * is perfect, down to 0 if it's totally wrong. | | 145 | * is perfect, down to 0 if it's totally wrong. |
146 | */ | | 146 | */ |
147 | int drm_edid_header_is_valid(const u8 *raw_edid) | | 147 | int drm_edid_header_is_valid(const u8 *raw_edid) |
148 | { | | 148 | { |
149 | int i, score = 0; | | 149 | int i, score = 0; |
150 | | | 150 | |
151 | for (i = 0; i < sizeof(edid_header); i++) | | 151 | for (i = 0; i < sizeof(edid_header); i++) |
152 | if (raw_edid[i] == edid_header[i]) | | 152 | if (raw_edid[i] == edid_header[i]) |
153 | score++; | | 153 | score++; |
154 | | | 154 | |
155 | return score; | | 155 | return score; |
156 | } | | 156 | } |
157 | EXPORT_SYMBOL(drm_edid_header_is_valid); | | 157 | EXPORT_SYMBOL(drm_edid_header_is_valid); |
158 | | | 158 | |
159 | static int edid_fixup __read_mostly = 6; | | 159 | static int edid_fixup __read_mostly = 6; |
160 | module_param_named(edid_fixup, edid_fixup, int, 0400); | | 160 | module_param_named(edid_fixup, edid_fixup, int, 0400); |
161 | MODULE_PARM_DESC(edid_fixup, | | 161 | MODULE_PARM_DESC(edid_fixup, |
162 | "Minimum number of valid EDID header bytes (0-8, default 6)"); | | 162 | "Minimum number of valid EDID header bytes (0-8, default 6)"); |
163 | | | 163 | |
164 | /* | | 164 | /* |
165 | * Sanity check the EDID block (base or extension). Return 0 if the block | | 165 | * Sanity check the EDID block (base or extension). Return 0 if the block |
166 | * doesn't check out, or 1 if it's valid. | | 166 | * doesn't check out, or 1 if it's valid. |
167 | */ | | 167 | */ |
168 | bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) | | 168 | bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) |
169 | { | | 169 | { |
170 | int i; | | 170 | int i; |
171 | u8 csum = 0; | | 171 | u8 csum = 0; |
172 | struct edid *edid = (struct edid *)raw_edid; | | 172 | struct edid *edid = (struct edid *)raw_edid; |
173 | | | 173 | |
174 | if (edid_fixup > 8 || edid_fixup < 0) | | 174 | if (edid_fixup > 8 || edid_fixup < 0) |
175 | edid_fixup = 6; | | 175 | edid_fixup = 6; |
176 | | | 176 | |
177 | if (block == 0) { | | 177 | if (block == 0) { |
178 | int score = drm_edid_header_is_valid(raw_edid); | | 178 | int score = drm_edid_header_is_valid(raw_edid); |
179 | if (score == 8) ; | | 179 | if (score == 8) ; |
180 | else if (score >= edid_fixup) { | | 180 | else if (score >= edid_fixup) { |
181 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | | 181 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); |
182 | memcpy(raw_edid, edid_header, sizeof(edid_header)); | | 182 | memcpy(raw_edid, edid_header, sizeof(edid_header)); |
183 | } else { | | 183 | } else { |
184 | goto bad; | | 184 | goto bad; |
185 | } | | 185 | } |
186 | } | | 186 | } |
187 | | | 187 | |
188 | for (i = 0; i < EDID_LENGTH; i++) | | 188 | for (i = 0; i < EDID_LENGTH; i++) |
189 | csum += raw_edid[i]; | | 189 | csum += raw_edid[i]; |
190 | if (csum) { | | 190 | if (csum) { |
191 | if (print_bad_edid) { | | 191 | if (print_bad_edid) { |
192 | DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); | | 192 | DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); |
193 | } | | 193 | } |
194 | | | 194 | |
195 | /* allow CEA to slide through, switches mangle this */ | | 195 | /* allow CEA to slide through, switches mangle this */ |
196 | if (raw_edid[0] != 0x02) | | 196 | if (raw_edid[0] != 0x02) |
197 | goto bad; | | 197 | goto bad; |
198 | } | | 198 | } |
199 | | | 199 | |
200 | /* per-block-type checks */ | | 200 | /* per-block-type checks */ |
201 | switch (raw_edid[0]) { | | 201 | switch (raw_edid[0]) { |
202 | case 0: /* base */ | | 202 | case 0: /* base */ |
203 | if (edid->version != 1) { | | 203 | if (edid->version != 1) { |
204 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | | 204 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); |
205 | goto bad; | | 205 | goto bad; |
206 | } | | 206 | } |
207 | | | 207 | |
208 | if (edid->revision > 4) | | 208 | if (edid->revision > 4) |
209 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | | 209 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); |
210 | break; | | 210 | break; |
211 | | | 211 | |
212 | default: | | 212 | default: |
213 | break; | | 213 | break; |
214 | } | | 214 | } |
215 | | | 215 | |
216 | return 1; | | 216 | return 1; |
217 | | | 217 | |
218 | bad: | | 218 | bad: |
219 | if (raw_edid && print_bad_edid) { | | 219 | if (raw_edid && print_bad_edid) { |
220 | printk(KERN_ERR "Raw EDID:\n"); | | 220 | printk(KERN_ERR "Raw EDID:\n"); |
221 | #ifdef __NetBSD__ | | 221 | #ifdef __NetBSD__ |
222 | for (i = 0; i < EDID_LENGTH; i++) { | | 222 | for (i = 0; i < EDID_LENGTH; i++) { |
223 | printf("%02x", raw_edid[i]); | | 223 | printf("%02x", raw_edid[i]); |
224 | if ((i % 16) == 15) | | 224 | if ((i % 16) == 15) |
225 | printf("\n"); | | 225 | printf("\n"); |
226 | else | | 226 | else |
227 | printf(" "); | | 227 | printf(" "); |
228 | } | | 228 | } |
229 | #else | | 229 | #else |
230 | print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, | | 230 | print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, |
231 | raw_edid, EDID_LENGTH, false); | | 231 | raw_edid, EDID_LENGTH, false); |
232 | #endif | | 232 | #endif |
233 | } | | 233 | } |
234 | return 0; | | 234 | return 0; |
235 | } | | 235 | } |
236 | EXPORT_SYMBOL(drm_edid_block_valid); | | 236 | EXPORT_SYMBOL(drm_edid_block_valid); |
237 | | | 237 | |
238 | /** | | 238 | /** |
239 | * drm_edid_is_valid - sanity check EDID data | | 239 | * drm_edid_is_valid - sanity check EDID data |
240 | * @edid: EDID data | | 240 | * @edid: EDID data |
241 | * | | 241 | * |
242 | * Sanity-check an entire EDID record (including extensions) | | 242 | * Sanity-check an entire EDID record (including extensions) |
243 | */ | | 243 | */ |
244 | bool drm_edid_is_valid(struct edid *edid) | | 244 | bool drm_edid_is_valid(struct edid *edid) |
245 | { | | 245 | { |
246 | int i; | | 246 | int i; |
247 | u8 *raw = (u8 *)edid; | | 247 | u8 *raw = (u8 *)edid; |
248 | | | 248 | |
249 | if (!edid) | | 249 | if (!edid) |
250 | return false; | | 250 | return false; |
251 | | | 251 | |
252 | for (i = 0; i <= edid->extensions; i++) | | 252 | for (i = 0; i <= edid->extensions; i++) |
253 | if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) | | 253 | if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) |
254 | return false; | | 254 | return false; |
255 | | | 255 | |
256 | return true; | | 256 | return true; |
257 | } | | 257 | } |
258 | EXPORT_SYMBOL(drm_edid_is_valid); | | 258 | EXPORT_SYMBOL(drm_edid_is_valid); |
259 | | | 259 | |
| | | 260 | #ifndef __NetBSD__ /* XXX i2c */ |
| | | 261 | |
260 | #define DDC_SEGMENT_ADDR 0x30 | | 262 | #define DDC_SEGMENT_ADDR 0x30 |
261 | /** | | 263 | /** |
262 | * Get EDID information via I2C. | | 264 | * Get EDID information via I2C. |
263 | * | | 265 | * |
264 | * \param adapter : i2c device adaptor | | 266 | * \param adapter : i2c device adaptor |
265 | * \param buf : EDID data buffer to be filled | | 267 | * \param buf : EDID data buffer to be filled |
266 | * \param len : EDID data buffer length | | 268 | * \param len : EDID data buffer length |
267 | * \return 0 on success or -1 on failure. | | 269 | * \return 0 on success or -1 on failure. |
268 | * | | 270 | * |
269 | * Try to fetch EDID information by calling i2c driver function. | | 271 | * Try to fetch EDID information by calling i2c driver function. |
270 | */ | | 272 | */ |
271 | static int | | 273 | static int |
272 | drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | | 274 | drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, |
273 | int block, int len) | | 275 | int block, int len) |
274 | { | | 276 | { |
275 | unsigned char start = block * EDID_LENGTH; | | 277 | unsigned char start = block * EDID_LENGTH; |
276 | unsigned char segment = block >> 1; | | 278 | unsigned char segment = block >> 1; |
277 | unsigned char xfers = segment ? 3 : 2; | | 279 | unsigned char xfers = segment ? 3 : 2; |
278 | int ret, retries = 5; | | 280 | int ret, retries = 5; |
279 | | | 281 | |
280 | /* The core i2c driver will automatically retry the transfer if the | | 282 | /* The core i2c driver will automatically retry the transfer if the |
281 | * adapter reports EAGAIN. However, we find that bit-banging transfers | | 283 | * adapter reports EAGAIN. However, we find that bit-banging transfers |
282 | * are susceptible to errors under a heavily loaded machine and | | 284 | * are susceptible to errors under a heavily loaded machine and |
283 | * generate spurious NAKs and timeouts. Retrying the transfer | | 285 | * generate spurious NAKs and timeouts. Retrying the transfer |
284 | * of the individual block a few times seems to overcome this. | | 286 | * of the individual block a few times seems to overcome this. |
285 | */ | | 287 | */ |
286 | do { | | 288 | do { |
287 | struct i2c_msg msgs[] = { | | 289 | struct i2c_msg msgs[] = { |
288 | { | | 290 | { |
289 | .addr = DDC_SEGMENT_ADDR, | | 291 | .addr = DDC_SEGMENT_ADDR, |
290 | .flags = 0, | | 292 | .flags = 0, |
291 | .len = 1, | | 293 | .len = 1, |
292 | .buf = &segment, | | 294 | .buf = &segment, |
293 | }, { | | 295 | }, { |
294 | .addr = DDC_ADDR, | | 296 | .addr = DDC_ADDR, |
295 | .flags = 0, | | 297 | .flags = 0, |
296 | .len = 1, | | 298 | .len = 1, |
297 | .buf = &start, | | 299 | .buf = &start, |
298 | }, { | | 300 | }, { |
299 | .addr = DDC_ADDR, | | 301 | .addr = DDC_ADDR, |
300 | .flags = I2C_M_RD, | | 302 | .flags = I2C_M_RD, |
301 | .len = len, | | 303 | .len = len, |
302 | .buf = buf, | | 304 | .buf = buf, |
303 | } | | 305 | } |
304 | }; | | 306 | }; |
305 | | | 307 | |
306 | /* | | 308 | /* |
307 | * Avoid sending the segment addr to not upset non-compliant ddc | | 309 | * Avoid sending the segment addr to not upset non-compliant ddc |
308 | * monitors. | | 310 | * monitors. |
309 | */ | | 311 | */ |
310 | ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); | | 312 | ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); |
311 | | | 313 | |
312 | if (ret == -ENXIO) { | | 314 | if (ret == -ENXIO) { |
313 | DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", | | 315 | DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", |
314 | adapter->name); | | 316 | adapter->name); |
315 | break; | | 317 | break; |
316 | } | | 318 | } |
317 | } while (ret != xfers && --retries); | | 319 | } while (ret != xfers && --retries); |
318 | | | 320 | |
319 | return ret == xfers ? 0 : -1; | | 321 | return ret == xfers ? 0 : -1; |
320 | } | | 322 | } |
321 | | | 323 | |
322 | static bool drm_edid_is_zero(u8 *in_edid, int length) | | 324 | static bool drm_edid_is_zero(u8 *in_edid, int length) |
323 | { | | 325 | { |
324 | if (memchr_inv(in_edid, 0, length)) | | 326 | if (memchr_inv(in_edid, 0, length)) |
325 | return false; | | 327 | return false; |
326 | | | 328 | |
327 | return true; | | 329 | return true; |
328 | } | | 330 | } |
329 | | | 331 | |
330 | static u8 * | | 332 | static u8 * |
331 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | | 333 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
332 | { | | 334 | { |
333 | int i, j = 0, valid_extensions = 0; | | 335 | int i, j = 0, valid_extensions = 0; |
334 | u8 *block, *new; | | 336 | u8 *block, *new; |
335 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); | | 337 | bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); |
336 | | | 338 | |
337 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | | 339 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
338 | return NULL; | | 340 | return NULL; |
339 | | | 341 | |
340 | /* base block fetch */ | | 342 | /* base block fetch */ |
341 | for (i = 0; i < 4; i++) { | | 343 | for (i = 0; i < 4; i++) { |
342 | if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) | | 344 | if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) |
343 | goto out; | | 345 | goto out; |
344 | if (drm_edid_block_valid(block, 0, print_bad_edid)) | | 346 | if (drm_edid_block_valid(block, 0, print_bad_edid)) |
345 | break; | | 347 | break; |
346 | if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { | | 348 | if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { |
347 | connector->null_edid_counter++; | | 349 | connector->null_edid_counter++; |
348 | goto carp; | | 350 | goto carp; |
349 | } | | 351 | } |
350 | } | | 352 | } |
351 | if (i == 4) | | 353 | if (i == 4) |
352 | goto carp; | | 354 | goto carp; |
353 | | | 355 | |
354 | /* if there's no extensions, we're done */ | | 356 | /* if there's no extensions, we're done */ |
355 | if (block[0x7e] == 0) | | 357 | if (block[0x7e] == 0) |
356 | return block; | | 358 | return block; |
357 | | | 359 | |
358 | new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); | | 360 | new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); |
359 | if (!new) | | 361 | if (!new) |
360 | goto out; | | 362 | goto out; |
361 | block = new; | | 363 | block = new; |
362 | | | 364 | |
363 | for (j = 1; j <= block[0x7e]; j++) { | | 365 | for (j = 1; j <= block[0x7e]; j++) { |
364 | for (i = 0; i < 4; i++) { | | 366 | for (i = 0; i < 4; i++) { |
365 | if (drm_do_probe_ddc_edid(adapter, | | 367 | if (drm_do_probe_ddc_edid(adapter, |
366 | block + (valid_extensions + 1) * EDID_LENGTH, | | 368 | block + (valid_extensions + 1) * EDID_LENGTH, |
367 | j, EDID_LENGTH)) | | 369 | j, EDID_LENGTH)) |
368 | goto out; | | 370 | goto out; |
369 | if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { | | 371 | if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { |
370 | valid_extensions++; | | 372 | valid_extensions++; |
371 | break; | | 373 | break; |
372 | } | | 374 | } |
373 | } | | 375 | } |
374 | if (i == 4) | | 376 | if (i == 4) |
375 | dev_warn(connector->dev->dev, | | 377 | dev_warn(connector->dev->dev, |
376 | "%s: Ignoring invalid EDID block %d.\n", | | 378 | "%s: Ignoring invalid EDID block %d.\n", |
377 | drm_get_connector_name(connector), j); | | 379 | drm_get_connector_name(connector), j); |
378 | } | | 380 | } |
379 | | | 381 | |
380 | if (valid_extensions != block[0x7e]) { | | 382 | if (valid_extensions != block[0x7e]) { |
381 | block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; | | 383 | block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; |
382 | block[0x7e] = valid_extensions; | | 384 | block[0x7e] = valid_extensions; |
383 | new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | | 385 | new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); |
384 | if (!new) | | 386 | if (!new) |
385 | goto out; | | 387 | goto out; |
386 | block = new; | | 388 | block = new; |
387 | } | | 389 | } |
388 | | | 390 | |
389 | return block; | | 391 | return block; |
390 | | | 392 | |
391 | carp: | | 393 | carp: |
392 | if (print_bad_edid) { | | 394 | if (print_bad_edid) { |
393 | dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", | | 395 | dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", |
394 | drm_get_connector_name(connector), j); | | 396 | drm_get_connector_name(connector), j); |
395 | } | | 397 | } |
396 | connector->bad_edid_counter++; | | 398 | connector->bad_edid_counter++; |
397 | | | 399 | |
398 | out: | | 400 | out: |
399 | kfree(block); | | 401 | kfree(block); |
400 | return NULL; | | 402 | return NULL; |
401 | } | | 403 | } |
402 | | | 404 | |
403 | /** | | 405 | /** |
404 | * Probe DDC presence. | | 406 | * Probe DDC presence. |
405 | * | | 407 | * |
406 | * \param adapter : i2c device adaptor | | 408 | * \param adapter : i2c device adaptor |
407 | * \return 1 on success | | 409 | * \return 1 on success |
408 | */ | | 410 | */ |
409 | bool | | 411 | bool |
410 | drm_probe_ddc(struct i2c_adapter *adapter) | | 412 | drm_probe_ddc(struct i2c_adapter *adapter) |
411 | { | | 413 | { |
412 | unsigned char out; | | 414 | unsigned char out; |
413 | | | 415 | |
414 | return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); | | 416 | return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); |
415 | } | | 417 | } |
416 | EXPORT_SYMBOL(drm_probe_ddc); | | 418 | EXPORT_SYMBOL(drm_probe_ddc); |
417 | | | 419 | |
418 | /** | | 420 | /** |
419 | * drm_get_edid - get EDID data, if available | | 421 | * drm_get_edid - get EDID data, if available |
420 | * @connector: connector we're probing | | 422 | * @connector: connector we're probing |
421 | * @adapter: i2c adapter to use for DDC | | 423 | * @adapter: i2c adapter to use for DDC |
422 | * | | 424 | * |
423 | * Poke the given i2c channel to grab EDID data if possible. If found, | | 425 | * Poke the given i2c channel to grab EDID data if possible. If found, |
424 | * attach it to the connector. | | 426 | * attach it to the connector. |
425 | * | | 427 | * |
426 | * Return edid data or NULL if we couldn't find any. | | 428 | * Return edid data or NULL if we couldn't find any. |
427 | */ | | 429 | */ |
428 | struct edid *drm_get_edid(struct drm_connector *connector, | | 430 | struct edid *drm_get_edid(struct drm_connector *connector, |
429 | struct i2c_adapter *adapter) | | 431 | struct i2c_adapter *adapter) |
430 | { | | 432 | { |
431 | struct edid *edid = NULL; | | 433 | struct edid *edid = NULL; |
432 | | | 434 | |
433 | if (drm_probe_ddc(adapter)) | | 435 | if (drm_probe_ddc(adapter)) |
434 | edid = (struct edid *)drm_do_get_edid(connector, adapter); | | 436 | edid = (struct edid *)drm_do_get_edid(connector, adapter); |
435 | | | 437 | |
436 | return edid; | | 438 | return edid; |
437 | } | | 439 | } |
438 | EXPORT_SYMBOL(drm_get_edid); | | 440 | EXPORT_SYMBOL(drm_get_edid); |
439 | | | 441 | |
| | | 442 | #endif /* !defined(__NetBSD__) */ |
| | | 443 | |
440 | /*** EDID parsing ***/ | | 444 | /*** EDID parsing ***/ |
441 | | | 445 | |
442 | /** | | 446 | /** |
443 | * edid_vendor - match a string against EDID's obfuscated vendor field | | 447 | * edid_vendor - match a string against EDID's obfuscated vendor field |
444 | * @edid: EDID to match | | 448 | * @edid: EDID to match |
445 | * @vendor: vendor string | | 449 | * @vendor: vendor string |
446 | * | | 450 | * |
447 | * Returns true if @vendor is in @edid, false otherwise | | 451 | * Returns true if @vendor is in @edid, false otherwise |
448 | */ | | 452 | */ |
449 | static bool edid_vendor(struct edid *edid, char *vendor) | | 453 | static bool edid_vendor(struct edid *edid, char *vendor) |
450 | { | | 454 | { |
451 | char edid_vendor[3]; | | 455 | char edid_vendor[3]; |
452 | | | 456 | |
453 | edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; | | 457 | edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; |
454 | edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | | | 458 | edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | |
455 | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; | | 459 | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; |
456 | edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; | | 460 | edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; |
457 | | | 461 | |
458 | return !strncmp(edid_vendor, vendor, 3); | | 462 | return !strncmp(edid_vendor, vendor, 3); |
459 | } | | 463 | } |
460 | | | 464 | |
461 | /** | | 465 | /** |
462 | * edid_get_quirks - return quirk flags for a given EDID | | 466 | * edid_get_quirks - return quirk flags for a given EDID |
463 | * @edid: EDID to process | | 467 | * @edid: EDID to process |
464 | * | | 468 | * |
465 | * This tells subsequent routines what fixes they need to apply. | | 469 | * This tells subsequent routines what fixes they need to apply. |
466 | */ | | 470 | */ |
467 | static u32 edid_get_quirks(struct edid *edid) | | 471 | static u32 edid_get_quirks(struct edid *edid) |
468 | { | | 472 | { |
469 | struct edid_quirk *quirk; | | 473 | struct edid_quirk *quirk; |
470 | int i; | | 474 | int i; |
471 | | | 475 | |
472 | for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { | | 476 | for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { |
473 | quirk = &edid_quirk_list[i]; | | 477 | quirk = &edid_quirk_list[i]; |
474 | | | 478 | |
475 | if (edid_vendor(edid, quirk->vendor) && | | 479 | if (edid_vendor(edid, quirk->vendor) && |
476 | (EDID_PRODUCT_ID(edid) == quirk->product_id)) | | 480 | (EDID_PRODUCT_ID(edid) == quirk->product_id)) |
477 | return quirk->quirks; | | 481 | return quirk->quirks; |
478 | } | | 482 | } |
479 | | | 483 | |
480 | return 0; | | 484 | return 0; |
481 | } | | 485 | } |
482 | | | 486 | |
483 | #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) | | 487 | #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) |
484 | #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) | | 488 | #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) |
485 | | | 489 | |
486 | /** | | 490 | /** |
487 | * edid_fixup_preferred - set preferred modes based on quirk list | | 491 | * edid_fixup_preferred - set preferred modes based on quirk list |
488 | * @connector: has mode list to fix up | | 492 | * @connector: has mode list to fix up |
489 | * @quirks: quirks list | | 493 | * @quirks: quirks list |
490 | * | | 494 | * |
491 | * Walk the mode list for @connector, clearing the preferred status | | 495 | * Walk the mode list for @connector, clearing the preferred status |
492 | * on existing modes and setting it anew for the right mode ala @quirks. | | 496 | * on existing modes and setting it anew for the right mode ala @quirks. |
493 | */ | | 497 | */ |
494 | static void edid_fixup_preferred(struct drm_connector *connector, | | 498 | static void edid_fixup_preferred(struct drm_connector *connector, |
495 | u32 quirks) | | 499 | u32 quirks) |
496 | { | | 500 | { |
497 | struct drm_display_mode *t, *cur_mode, *preferred_mode; | | 501 | struct drm_display_mode *t, *cur_mode, *preferred_mode; |
498 | int target_refresh = 0; | | 502 | int target_refresh = 0; |
499 | | | 503 | |
500 | if (list_empty(&connector->probed_modes)) | | 504 | if (list_empty(&connector->probed_modes)) |
501 | return; | | 505 | return; |
502 | | | 506 | |
503 | if (quirks & EDID_QUIRK_PREFER_LARGE_60) | | 507 | if (quirks & EDID_QUIRK_PREFER_LARGE_60) |
504 | target_refresh = 60; | | 508 | target_refresh = 60; |
505 | if (quirks & EDID_QUIRK_PREFER_LARGE_75) | | 509 | if (quirks & EDID_QUIRK_PREFER_LARGE_75) |
506 | target_refresh = 75; | | 510 | target_refresh = 75; |
507 | | | 511 | |
508 | preferred_mode = list_first_entry(&connector->probed_modes, | | 512 | preferred_mode = list_first_entry(&connector->probed_modes, |
509 | struct drm_display_mode, head); | | 513 | struct drm_display_mode, head); |
510 | | | 514 | |
511 | list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { | | 515 | list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { |
512 | cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; | | 516 | cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; |
513 | | | 517 | |
514 | if (cur_mode == preferred_mode) | | 518 | if (cur_mode == preferred_mode) |
515 | continue; | | 519 | continue; |
516 | | | 520 | |
517 | /* Largest mode is preferred */ | | 521 | /* Largest mode is preferred */ |
518 | if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) | | 522 | if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) |
519 | preferred_mode = cur_mode; | | 523 | preferred_mode = cur_mode; |
520 | | | 524 | |
521 | /* At a given size, try to get closest to target refresh */ | | 525 | /* At a given size, try to get closest to target refresh */ |
522 | if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && | | 526 | if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && |
523 | MODE_REFRESH_DIFF(cur_mode, target_refresh) < | | 527 | MODE_REFRESH_DIFF(cur_mode, target_refresh) < |
524 | MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { | | 528 | MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { |
525 | preferred_mode = cur_mode; | | 529 | preferred_mode = cur_mode; |
526 | } | | 530 | } |
527 | } | | 531 | } |
528 | | | 532 | |
529 | preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; | | 533 | preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; |
530 | } | | 534 | } |
531 | | | 535 | |
532 | static bool | | 536 | static bool |
533 | mode_is_rb(const struct drm_display_mode *mode) | | 537 | mode_is_rb(const struct drm_display_mode *mode) |
534 | { | | 538 | { |
535 | return (mode->htotal - mode->hdisplay == 160) && | | 539 | return (mode->htotal - mode->hdisplay == 160) && |
536 | (mode->hsync_end - mode->hdisplay == 80) && | | 540 | (mode->hsync_end - mode->hdisplay == 80) && |
537 | (mode->hsync_end - mode->hsync_start == 32) && | | 541 | (mode->hsync_end - mode->hsync_start == 32) && |
538 | (mode->vsync_start - mode->vdisplay == 3); | | 542 | (mode->vsync_start - mode->vdisplay == 3); |
539 | } | | 543 | } |
540 | | | 544 | |
541 | /* | | 545 | /* |
542 | * drm_mode_find_dmt - Create a copy of a mode if present in DMT | | 546 | * drm_mode_find_dmt - Create a copy of a mode if present in DMT |
543 | * @dev: Device to duplicate against | | 547 | * @dev: Device to duplicate against |
544 | * @hsize: Mode width | | 548 | * @hsize: Mode width |
545 | * @vsize: Mode height | | 549 | * @vsize: Mode height |
546 | * @fresh: Mode refresh rate | | 550 | * @fresh: Mode refresh rate |
547 | * @rb: Mode reduced-blanking-ness | | 551 | * @rb: Mode reduced-blanking-ness |
548 | * | | 552 | * |
549 | * Walk the DMT mode list looking for a match for the given parameters. | | 553 | * Walk the DMT mode list looking for a match for the given parameters. |
550 | * Return a newly allocated copy of the mode, or NULL if not found. | | 554 | * Return a newly allocated copy of the mode, or NULL if not found. |
551 | */ | | 555 | */ |
552 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, | | 556 | struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
553 | int hsize, int vsize, int fresh, | | 557 | int hsize, int vsize, int fresh, |
554 | bool rb) | | 558 | bool rb) |
555 | { | | 559 | { |
556 | int i; | | 560 | int i; |
557 | | | 561 | |
558 | for (i = 0; i < drm_num_dmt_modes; i++) { | | 562 | for (i = 0; i < drm_num_dmt_modes; i++) { |
559 | const struct drm_display_mode *ptr = &drm_dmt_modes[i]; | | 563 | const struct drm_display_mode *ptr = &drm_dmt_modes[i]; |
560 | if (hsize != ptr->hdisplay) | | 564 | if (hsize != ptr->hdisplay) |
561 | continue; | | 565 | continue; |
562 | if (vsize != ptr->vdisplay) | | 566 | if (vsize != ptr->vdisplay) |
563 | continue; | | 567 | continue; |
564 | if (fresh != drm_mode_vrefresh(ptr)) | | 568 | if (fresh != drm_mode_vrefresh(ptr)) |
565 | continue; | | 569 | continue; |
566 | if (rb != mode_is_rb(ptr)) | | 570 | if (rb != mode_is_rb(ptr)) |
567 | continue; | | 571 | continue; |
568 | | | 572 | |
569 | return drm_mode_duplicate(dev, ptr); | | 573 | return drm_mode_duplicate(dev, ptr); |
570 | } | | 574 | } |
571 | | | 575 | |
572 | return NULL; | | 576 | return NULL; |
573 | } | | 577 | } |
574 | EXPORT_SYMBOL(drm_mode_find_dmt); | | 578 | EXPORT_SYMBOL(drm_mode_find_dmt); |
575 | | | 579 | |
576 | typedef void detailed_cb(struct detailed_timing *timing, void *closure); | | 580 | typedef void detailed_cb(struct detailed_timing *timing, void *closure); |
577 | | | 581 | |
578 | static void | | 582 | static void |
579 | cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) | | 583 | cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) |
580 | { | | 584 | { |
581 | int i, n = 0; | | 585 | int i, n = 0; |
582 | u8 d = ext[0x02]; | | 586 | u8 d = ext[0x02]; |
583 | u8 *det_base = ext + d; | | 587 | u8 *det_base = ext + d; |
584 | | | 588 | |
585 | n = (127 - d) / 18; | | 589 | n = (127 - d) / 18; |
586 | for (i = 0; i < n; i++) | | 590 | for (i = 0; i < n; i++) |
587 | cb((struct detailed_timing *)(det_base + 18 * i), closure); | | 591 | cb((struct detailed_timing *)(det_base + 18 * i), closure); |
588 | } | | 592 | } |
589 | | | 593 | |
590 | static void | | 594 | static void |
591 | vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) | | 595 | vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) |
592 | { | | 596 | { |
593 | unsigned int i, n = min((int)ext[0x02], 6); | | 597 | unsigned int i, n = min((int)ext[0x02], 6); |
594 | u8 *det_base = ext + 5; | | 598 | u8 *det_base = ext + 5; |
595 | | | 599 | |
596 | if (ext[0x01] != 1) | | 600 | if (ext[0x01] != 1) |
597 | return; /* unknown version */ | | 601 | return; /* unknown version */ |
598 | | | 602 | |
599 | for (i = 0; i < n; i++) | | 603 | for (i = 0; i < n; i++) |
600 | cb((struct detailed_timing *)(det_base + 18 * i), closure); | | 604 | cb((struct detailed_timing *)(det_base + 18 * i), closure); |
601 | } | | 605 | } |
602 | | | 606 | |
603 | static void | | 607 | static void |
604 | drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) | | 608 | drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) |
605 | { | | 609 | { |
606 | int i; | | 610 | int i; |
607 | struct edid *edid = (struct edid *)raw_edid; | | 611 | struct edid *edid = (struct edid *)raw_edid; |
608 | | | 612 | |
609 | if (edid == NULL) | | 613 | if (edid == NULL) |
610 | return; | | 614 | return; |
611 | | | 615 | |
612 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) | | 616 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) |
613 | cb(&(edid->detailed_timings[i]), closure); | | 617 | cb(&(edid->detailed_timings[i]), closure); |
614 | | | 618 | |
615 | for (i = 1; i <= raw_edid[0x7e]; i++) { | | 619 | for (i = 1; i <= raw_edid[0x7e]; i++) { |
616 | u8 *ext = raw_edid + (i * EDID_LENGTH); | | 620 | u8 *ext = raw_edid + (i * EDID_LENGTH); |
617 | switch (*ext) { | | 621 | switch (*ext) { |
618 | case CEA_EXT: | | 622 | case CEA_EXT: |
619 | cea_for_each_detailed_block(ext, cb, closure); | | 623 | cea_for_each_detailed_block(ext, cb, closure); |
620 | break; | | 624 | break; |
621 | case VTB_EXT: | | 625 | case VTB_EXT: |
622 | vtb_for_each_detailed_block(ext, cb, closure); | | 626 | vtb_for_each_detailed_block(ext, cb, closure); |
623 | break; | | 627 | break; |
624 | default: | | 628 | default: |
625 | break; | | 629 | break; |
626 | } | | 630 | } |
627 | } | | 631 | } |
628 | } | | 632 | } |
629 | | | 633 | |
630 | static void | | 634 | static void |
631 | is_rb(struct detailed_timing *t, void *data) | | 635 | is_rb(struct detailed_timing *t, void *data) |
632 | { | | 636 | { |
633 | u8 *r = (u8 *)t; | | 637 | u8 *r = (u8 *)t; |
634 | if (r[3] == EDID_DETAIL_MONITOR_RANGE) | | 638 | if (r[3] == EDID_DETAIL_MONITOR_RANGE) |
635 | if (r[15] & 0x10) | | 639 | if (r[15] & 0x10) |
636 | *(bool *)data = true; | | 640 | *(bool *)data = true; |
637 | } | | 641 | } |
638 | | | 642 | |
639 | /* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ | | 643 | /* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ |
640 | static bool | | 644 | static bool |
641 | drm_monitor_supports_rb(struct edid *edid) | | 645 | drm_monitor_supports_rb(struct edid *edid) |
642 | { | | 646 | { |
643 | if (edid->revision >= 4) { | | 647 | if (edid->revision >= 4) { |
644 | bool ret = false; | | 648 | bool ret = false; |
645 | drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); | | 649 | drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); |
646 | return ret; | | 650 | return ret; |
647 | } | | 651 | } |
648 | | | 652 | |
649 | return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); | | 653 | return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); |
650 | } | | 654 | } |
651 | | | 655 | |
652 | static void | | 656 | static void |
653 | find_gtf2(struct detailed_timing *t, void *data) | | 657 | find_gtf2(struct detailed_timing *t, void *data) |
654 | { | | 658 | { |
655 | u8 *r = (u8 *)t; | | 659 | u8 *r = (u8 *)t; |
656 | if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) | | 660 | if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) |
657 | *(u8 **)data = r; | | 661 | *(u8 **)data = r; |
658 | } | | 662 | } |
659 | | | 663 | |
660 | /* Secondary GTF curve kicks in above some break frequency */ | | 664 | /* Secondary GTF curve kicks in above some break frequency */ |
661 | static int | | 665 | static int |
662 | drm_gtf2_hbreak(struct edid *edid) | | 666 | drm_gtf2_hbreak(struct edid *edid) |
663 | { | | 667 | { |
664 | u8 *r = NULL; | | 668 | u8 *r = NULL; |
665 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); | | 669 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); |
666 | return r ? (r[12] * 2) : 0; | | 670 | return r ? (r[12] * 2) : 0; |
667 | } | | 671 | } |
668 | | | 672 | |
669 | static int | | 673 | static int |
670 | drm_gtf2_2c(struct edid *edid) | | 674 | drm_gtf2_2c(struct edid *edid) |
671 | { | | 675 | { |
672 | u8 *r = NULL; | | 676 | u8 *r = NULL; |
673 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); | | 677 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); |
674 | return r ? r[13] : 0; | | 678 | return r ? r[13] : 0; |
675 | } | | 679 | } |
676 | | | 680 | |
677 | static int | | 681 | static int |
678 | drm_gtf2_m(struct edid *edid) | | 682 | drm_gtf2_m(struct edid *edid) |
679 | { | | 683 | { |
680 | u8 *r = NULL; | | 684 | u8 *r = NULL; |
681 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); | | 685 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); |
682 | return r ? (r[15] << 8) + r[14] : 0; | | 686 | return r ? (r[15] << 8) + r[14] : 0; |
683 | } | | 687 | } |
684 | | | 688 | |
685 | static int | | 689 | static int |
686 | drm_gtf2_k(struct edid *edid) | | 690 | drm_gtf2_k(struct edid *edid) |
687 | { | | 691 | { |
688 | u8 *r = NULL; | | 692 | u8 *r = NULL; |
689 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); | | 693 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); |
690 | return r ? r[16] : 0; | | 694 | return r ? r[16] : 0; |
691 | } | | 695 | } |
692 | | | 696 | |
693 | static int | | 697 | static int |
694 | drm_gtf2_2j(struct edid *edid) | | 698 | drm_gtf2_2j(struct edid *edid) |
695 | { | | 699 | { |
696 | u8 *r = NULL; | | 700 | u8 *r = NULL; |
697 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); | | 701 | drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); |
698 | return r ? r[17] : 0; | | 702 | return r ? r[17] : 0; |
699 | } | | 703 | } |
700 | | | 704 | |
701 | /** | | 705 | /** |
702 | * standard_timing_level - get std. timing level(CVT/GTF/DMT) | | 706 | * standard_timing_level - get std. timing level(CVT/GTF/DMT) |
703 | * @edid: EDID block to scan | | 707 | * @edid: EDID block to scan |
704 | */ | | 708 | */ |
705 | static int standard_timing_level(struct edid *edid) | | 709 | static int standard_timing_level(struct edid *edid) |
706 | { | | 710 | { |
707 | if (edid->revision >= 2) { | | 711 | if (edid->revision >= 2) { |
708 | if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) | | 712 | if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) |
709 | return LEVEL_CVT; | | 713 | return LEVEL_CVT; |
710 | if (drm_gtf2_hbreak(edid)) | | 714 | if (drm_gtf2_hbreak(edid)) |
711 | return LEVEL_GTF2; | | 715 | return LEVEL_GTF2; |
712 | return LEVEL_GTF; | | 716 | return LEVEL_GTF; |
713 | } | | 717 | } |
714 | return LEVEL_DMT; | | 718 | return LEVEL_DMT; |
715 | } | | 719 | } |
716 | | | 720 | |
717 | /* | | 721 | /* |
718 | * 0 is reserved. The spec says 0x01 fill for unused timings. Some old | | 722 | * 0 is reserved. The spec says 0x01 fill for unused timings. Some old |
719 | * monitors fill with ascii space (0x20) instead. | | 723 | * monitors fill with ascii space (0x20) instead. |
720 | */ | | 724 | */ |
721 | static int | | 725 | static int |
722 | bad_std_timing(u8 a, u8 b) | | 726 | bad_std_timing(u8 a, u8 b) |
723 | { | | 727 | { |
724 | return (a == 0x00 && b == 0x00) || | | 728 | return (a == 0x00 && b == 0x00) || |
725 | (a == 0x01 && b == 0x01) || | | 729 | (a == 0x01 && b == 0x01) || |
726 | (a == 0x20 && b == 0x20); | | 730 | (a == 0x20 && b == 0x20); |
727 | } | | 731 | } |
728 | | | 732 | |
729 | /** | | 733 | /** |
730 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode | | 734 | * drm_mode_std - convert standard mode info (width, height, refresh) into mode |
731 | * @t: standard timing params | | 735 | * @t: standard timing params |
732 | * @timing_level: standard timing level | | 736 | * @timing_level: standard timing level |
733 | * | | 737 | * |
734 | * Take the standard timing params (in this case width, aspect, and refresh) | | 738 | * Take the standard timing params (in this case width, aspect, and refresh) |
735 | * and convert them into a real mode using CVT/GTF/DMT. | | 739 | * and convert them into a real mode using CVT/GTF/DMT. |
736 | */ | | 740 | */ |
737 | static struct drm_display_mode * | | 741 | static struct drm_display_mode * |
738 | drm_mode_std(struct drm_connector *connector, struct edid *edid, | | 742 | drm_mode_std(struct drm_connector *connector, struct edid *edid, |
739 | struct std_timing *t, int revision) | | 743 | struct std_timing *t, int revision) |
740 | { | | 744 | { |
741 | struct drm_device *dev = connector->dev; | | 745 | struct drm_device *dev = connector->dev; |
742 | struct drm_display_mode *m, *mode = NULL; | | 746 | struct drm_display_mode *m, *mode = NULL; |
743 | int hsize, vsize; | | 747 | int hsize, vsize; |
744 | int vrefresh_rate; | | 748 | int vrefresh_rate; |
745 | unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) | | 749 | unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) |
746 | >> EDID_TIMING_ASPECT_SHIFT; | | 750 | >> EDID_TIMING_ASPECT_SHIFT; |
747 | unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) | | 751 | unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) |
748 | >> EDID_TIMING_VFREQ_SHIFT; | | 752 | >> EDID_TIMING_VFREQ_SHIFT; |
749 | int timing_level = standard_timing_level(edid); | | 753 | int timing_level = standard_timing_level(edid); |
750 | | | 754 | |
751 | if (bad_std_timing(t->hsize, t->vfreq_aspect)) | | 755 | if (bad_std_timing(t->hsize, t->vfreq_aspect)) |
752 | return NULL; | | 756 | return NULL; |
753 | | | 757 | |
754 | /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ | | 758 | /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ |
755 | hsize = t->hsize * 8 + 248; | | 759 | hsize = t->hsize * 8 + 248; |
756 | /* vrefresh_rate = vfreq + 60 */ | | 760 | /* vrefresh_rate = vfreq + 60 */ |
757 | vrefresh_rate = vfreq + 60; | | 761 | vrefresh_rate = vfreq + 60; |
758 | /* the vdisplay is calculated based on the aspect ratio */ | | 762 | /* the vdisplay is calculated based on the aspect ratio */ |
759 | if (aspect_ratio == 0) { | | 763 | if (aspect_ratio == 0) { |
760 | if (revision < 3) | | 764 | if (revision < 3) |
761 | vsize = hsize; | | 765 | vsize = hsize; |
762 | else | | 766 | else |
763 | vsize = (hsize * 10) / 16; | | 767 | vsize = (hsize * 10) / 16; |
764 | } else if (aspect_ratio == 1) | | 768 | } else if (aspect_ratio == 1) |
765 | vsize = (hsize * 3) / 4; | | 769 | vsize = (hsize * 3) / 4; |
766 | else if (aspect_ratio == 2) | | 770 | else if (aspect_ratio == 2) |
767 | vsize = (hsize * 4) / 5; | | 771 | vsize = (hsize * 4) / 5; |
768 | else | | 772 | else |
769 | vsize = (hsize * 9) / 16; | | 773 | vsize = (hsize * 9) / 16; |
770 | | | 774 | |
771 | /* HDTV hack, part 1 */ | | 775 | /* HDTV hack, part 1 */ |
772 | if (vrefresh_rate == 60 && | | 776 | if (vrefresh_rate == 60 && |
773 | ((hsize == 1360 && vsize == 765) || | | 777 | ((hsize == 1360 && vsize == 765) || |
774 | (hsize == 1368 && vsize == 769))) { | | 778 | (hsize == 1368 && vsize == 769))) { |
775 | hsize = 1366; | | 779 | hsize = 1366; |
776 | vsize = 768; | | 780 | vsize = 768; |
777 | } | | 781 | } |
778 | | | 782 | |
779 | /* | | 783 | /* |
780 | * If this connector already has a mode for this size and refresh | | 784 | * If this connector already has a mode for this size and refresh |
781 | * rate (because it came from detailed or CVT info), use that | | 785 | * rate (because it came from detailed or CVT info), use that |
782 | * instead. This way we don't have to guess at interlace or | | 786 | * instead. This way we don't have to guess at interlace or |
783 | * reduced blanking. | | 787 | * reduced blanking. |
784 | */ | | 788 | */ |
785 | list_for_each_entry(m, &connector->probed_modes, head) | | 789 | list_for_each_entry(m, &connector->probed_modes, head) |
786 | if (m->hdisplay == hsize && m->vdisplay == vsize && | | 790 | if (m->hdisplay == hsize && m->vdisplay == vsize && |
787 | drm_mode_vrefresh(m) == vrefresh_rate) | | 791 | drm_mode_vrefresh(m) == vrefresh_rate) |
788 | return NULL; | | 792 | return NULL; |
789 | | | 793 | |
790 | /* HDTV hack, part 2 */ | | 794 | /* HDTV hack, part 2 */ |
791 | if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { | | 795 | if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { |
792 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, | | 796 | mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, |
793 | false); | | 797 | false); |
794 | mode->hdisplay = 1366; | | 798 | mode->hdisplay = 1366; |
795 | mode->hsync_start = mode->hsync_start - 1; | | 799 | mode->hsync_start = mode->hsync_start - 1; |
796 | mode->hsync_end = mode->hsync_end - 1; | | 800 | mode->hsync_end = mode->hsync_end - 1; |
797 | return mode; | | 801 | return mode; |
798 | } | | 802 | } |
799 | | | 803 | |
800 | /* check whether it can be found in default mode table */ | | 804 | /* check whether it can be found in default mode table */ |
801 | if (drm_monitor_supports_rb(edid)) { | | 805 | if (drm_monitor_supports_rb(edid)) { |
802 | mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, | | 806 | mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, |
803 | true); | | 807 | true); |
804 | if (mode) | | 808 | if (mode) |
805 | return mode; | | 809 | return mode; |
806 | } | | 810 | } |
807 | mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); | | 811 | mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); |
808 | if (mode) | | 812 | if (mode) |
809 | return mode; | | 813 | return mode; |
810 | | | 814 | |
811 | /* okay, generate it */ | | 815 | /* okay, generate it */ |
812 | switch (timing_level) { | | 816 | switch (timing_level) { |
813 | case LEVEL_DMT: | | 817 | case LEVEL_DMT: |
814 | break; | | 818 | break; |
815 | case LEVEL_GTF: | | 819 | case LEVEL_GTF: |
816 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | | 820 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
817 | break; | | 821 | break; |
818 | case LEVEL_GTF2: | | 822 | case LEVEL_GTF2: |
819 | /* | | 823 | /* |
820 | * This is potentially wrong if there's ever a monitor with | | 824 | * This is potentially wrong if there's ever a monitor with |
821 | * more than one ranges section, each claiming a different | | 825 | * more than one ranges section, each claiming a different |
822 | * secondary GTF curve. Please don't do that. | | 826 | * secondary GTF curve. Please don't do that. |
823 | */ | | 827 | */ |
824 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); | | 828 | mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
825 | if (!mode) | | 829 | if (!mode) |
826 | return NULL; | | 830 | return NULL; |
827 | if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { | | 831 | if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { |
828 | drm_mode_destroy(dev, mode); | | 832 | drm_mode_destroy(dev, mode); |
829 | mode = drm_gtf_mode_complex(dev, hsize, vsize, | | 833 | mode = drm_gtf_mode_complex(dev, hsize, vsize, |
830 | vrefresh_rate, 0, 0, | | 834 | vrefresh_rate, 0, 0, |
831 | drm_gtf2_m(edid), | | 835 | drm_gtf2_m(edid), |
832 | drm_gtf2_2c(edid), | | 836 | drm_gtf2_2c(edid), |
833 | drm_gtf2_k(edid), | | 837 | drm_gtf2_k(edid), |
834 | drm_gtf2_2j(edid)); | | 838 | drm_gtf2_2j(edid)); |
835 | } | | 839 | } |
836 | break; | | 840 | break; |
837 | case LEVEL_CVT: | | 841 | case LEVEL_CVT: |
838 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, | | 842 | mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, |
839 | false); | | 843 | false); |
840 | break; | | 844 | break; |
841 | } | | 845 | } |
842 | return mode; | | 846 | return mode; |
843 | } | | 847 | } |
844 | | | 848 | |
845 | /* | | 849 | /* |
846 | * EDID is delightfully ambiguous about how interlaced modes are to be | | 850 | * EDID is delightfully ambiguous about how interlaced modes are to be |
847 | * encoded. Our internal representation is of frame height, but some | | 851 | * encoded. Our internal representation is of frame height, but some |
848 | * HDTV detailed timings are encoded as field height. | | 852 | * HDTV detailed timings are encoded as field height. |
849 | * | | 853 | * |
850 | * The format list here is from CEA, in frame size. Technically we | | 854 | * The format list here is from CEA, in frame size. Technically we |
851 | * should be checking refresh rate too. Whatever. | | 855 | * should be checking refresh rate too. Whatever. |
852 | */ | | 856 | */ |
853 | static void | | 857 | static void |
854 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, | | 858 | drm_mode_do_interlace_quirk(struct drm_display_mode *mode, |
855 | struct detailed_pixel_timing *pt) | | 859 | struct detailed_pixel_timing *pt) |
856 | { | | 860 | { |
857 | int i; | | 861 | int i; |
858 | static const struct { | | 862 | static const struct { |
859 | int w, h; | | 863 | int w, h; |
860 | } cea_interlaced[] = { | | 864 | } cea_interlaced[] = { |
861 | { 1920, 1080 }, | | 865 | { 1920, 1080 }, |
862 | { 720, 480 }, | | 866 | { 720, 480 }, |
863 | { 1440, 480 }, | | 867 | { 1440, 480 }, |
864 | { 2880, 480 }, | | 868 | { 2880, 480 }, |
865 | { 720, 576 }, | | 869 | { 720, 576 }, |
866 | { 1440, 576 }, | | 870 | { 1440, 576 }, |
867 | { 2880, 576 }, | | 871 | { 2880, 576 }, |
868 | }; | | 872 | }; |
869 | | | 873 | |
870 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) | | 874 | if (!(pt->misc & DRM_EDID_PT_INTERLACED)) |
871 | return; | | 875 | return; |
872 | | | 876 | |
873 | for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { | | 877 | for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { |
874 | if ((mode->hdisplay == cea_interlaced[i].w) && | | 878 | if ((mode->hdisplay == cea_interlaced[i].w) && |
875 | (mode->vdisplay == cea_interlaced[i].h / 2)) { | | 879 | (mode->vdisplay == cea_interlaced[i].h / 2)) { |
876 | mode->vdisplay *= 2; | | 880 | mode->vdisplay *= 2; |
877 | mode->vsync_start *= 2; | | 881 | mode->vsync_start *= 2; |
878 | mode->vsync_end *= 2; | | 882 | mode->vsync_end *= 2; |
879 | mode->vtotal *= 2; | | 883 | mode->vtotal *= 2; |
880 | mode->vtotal |= 1; | | 884 | mode->vtotal |= 1; |
881 | } | | 885 | } |
882 | } | | 886 | } |
883 | | | 887 | |
884 | mode->flags |= DRM_MODE_FLAG_INTERLACE; | | 888 | mode->flags |= DRM_MODE_FLAG_INTERLACE; |
885 | } | | 889 | } |
886 | | | 890 | |
887 | /** | | 891 | /** |
888 | * drm_mode_detailed - create a new mode from an EDID detailed timing section | | 892 | * drm_mode_detailed - create a new mode from an EDID detailed timing section |
889 | * @dev: DRM device (needed to create new mode) | | 893 | * @dev: DRM device (needed to create new mode) |
890 | * @edid: EDID block | | 894 | * @edid: EDID block |
891 | * @timing: EDID detailed timing info | | 895 | * @timing: EDID detailed timing info |
892 | * @quirks: quirks to apply | | 896 | * @quirks: quirks to apply |
893 | * | | 897 | * |
894 | * An EDID detailed timing block contains enough info for us to create and | | 898 | * An EDID detailed timing block contains enough info for us to create and |
895 | * return a new struct drm_display_mode. | | 899 | * return a new struct drm_display_mode. |
896 | */ | | 900 | */ |
897 | static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | | 901 | static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, |
898 | struct edid *edid, | | 902 | struct edid *edid, |
899 | struct detailed_timing *timing, | | 903 | struct detailed_timing *timing, |
900 | u32 quirks) | | 904 | u32 quirks) |
901 | { | | 905 | { |
902 | struct drm_display_mode *mode; | | 906 | struct drm_display_mode *mode; |
903 | struct detailed_pixel_timing *pt = &timing->data.pixel_data; | | 907 | struct detailed_pixel_timing *pt = &timing->data.pixel_data; |
904 | unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; | | 908 | unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; |
905 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; | | 909 | unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; |
906 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; | | 910 | unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; |
907 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; | | 911 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
908 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; | | 912 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
909 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; | | 913 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
910 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; | | 914 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; |
911 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); | | 915 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
912 | | | 916 | |
913 | /* ignore tiny modes */ | | 917 | /* ignore tiny modes */ |
914 | if (hactive < 64 || vactive < 64) | | 918 | if (hactive < 64 || vactive < 64) |
915 | return NULL; | | 919 | return NULL; |
916 | | | 920 | |
917 | if (pt->misc & DRM_EDID_PT_STEREO) { | | 921 | if (pt->misc & DRM_EDID_PT_STEREO) { |
918 | printk(KERN_WARNING "stereo mode not supported\n"); | | 922 | printk(KERN_WARNING "stereo mode not supported\n"); |
919 | return NULL; | | 923 | return NULL; |
920 | } | | 924 | } |
921 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { | | 925 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { |
922 | printk(KERN_WARNING "composite sync not supported\n"); | | 926 | printk(KERN_WARNING "composite sync not supported\n"); |
923 | } | | 927 | } |
924 | | | 928 | |
925 | /* it is incorrect if hsync/vsync width is zero */ | | 929 | /* it is incorrect if hsync/vsync width is zero */ |
926 | if (!hsync_pulse_width || !vsync_pulse_width) { | | 930 | if (!hsync_pulse_width || !vsync_pulse_width) { |
927 | DRM_DEBUG_KMS("Incorrect Detailed timing. " | | 931 | DRM_DEBUG_KMS("Incorrect Detailed timing. " |
928 | "Wrong Hsync/Vsync pulse width\n"); | | 932 | "Wrong Hsync/Vsync pulse width\n"); |
929 | return NULL; | | 933 | return NULL; |
930 | } | | 934 | } |
931 | | | 935 | |
932 | if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { | | 936 | if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { |
933 | mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); | | 937 | mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); |
934 | if (!mode) | | 938 | if (!mode) |
935 | return NULL; | | 939 | return NULL; |
936 | | | 940 | |
937 | goto set_size; | | 941 | goto set_size; |
938 | } | | 942 | } |
939 | | | 943 | |
940 | mode = drm_mode_create(dev); | | 944 | mode = drm_mode_create(dev); |
941 | if (!mode) | | 945 | if (!mode) |
942 | return NULL; | | 946 | return NULL; |
943 | | | 947 | |
944 | if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) | | 948 | if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) |
945 | timing->pixel_clock = cpu_to_le16(1088); | | 949 | timing->pixel_clock = cpu_to_le16(1088); |
946 | | | 950 | |
947 | mode->clock = le16_to_cpu(timing->pixel_clock) * 10; | | 951 | mode->clock = le16_to_cpu(timing->pixel_clock) * 10; |
948 | | | 952 | |
949 | mode->hdisplay = hactive; | | 953 | mode->hdisplay = hactive; |
950 | mode->hsync_start = mode->hdisplay + hsync_offset; | | 954 | mode->hsync_start = mode->hdisplay + hsync_offset; |
951 | mode->hsync_end = mode->hsync_start + hsync_pulse_width; | | 955 | mode->hsync_end = mode->hsync_start + hsync_pulse_width; |
952 | mode->htotal = mode->hdisplay + hblank; | | 956 | mode->htotal = mode->hdisplay + hblank; |
953 | | | 957 | |
954 | mode->vdisplay = vactive; | | 958 | mode->vdisplay = vactive; |
955 | mode->vsync_start = mode->vdisplay + vsync_offset; | | 959 | mode->vsync_start = mode->vdisplay + vsync_offset; |
956 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; | | 960 | mode->vsync_end = mode->vsync_start + vsync_pulse_width; |
957 | mode->vtotal = mode->vdisplay + vblank; | | 961 | mode->vtotal = mode->vdisplay + vblank; |
958 | | | 962 | |
959 | /* Some EDIDs have bogus h/vtotal values */ | | 963 | /* Some EDIDs have bogus h/vtotal values */ |
960 | if (mode->hsync_end > mode->htotal) | | 964 | if (mode->hsync_end > mode->htotal) |
961 | mode->htotal = mode->hsync_end + 1; | | 965 | mode->htotal = mode->hsync_end + 1; |
962 | if (mode->vsync_end > mode->vtotal) | | 966 | if (mode->vsync_end > mode->vtotal) |
963 | mode->vtotal = mode->vsync_end + 1; | | 967 | mode->vtotal = mode->vsync_end + 1; |
964 | | | 968 | |
965 | drm_mode_do_interlace_quirk(mode, pt); | | 969 | drm_mode_do_interlace_quirk(mode, pt); |
966 | | | 970 | |
967 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { | | 971 | if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { |
968 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; | | 972 | pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; |
969 | } | | 973 | } |
970 | | | 974 | |
971 | mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? | | 975 | mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? |
972 | DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; | | 976 | DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; |
973 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? | | 977 | mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? |
974 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; | | 978 | DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; |
975 | | | 979 | |
976 | set_size: | | 980 | set_size: |
977 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; | | 981 | mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; |
978 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; | | 982 | mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; |
979 | | | 983 | |
980 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { | | 984 | if (quirks & EDID_QUIRK_DETAILED_IN_CM) { |
981 | mode->width_mm *= 10; | | 985 | mode->width_mm *= 10; |
982 | mode->height_mm *= 10; | | 986 | mode->height_mm *= 10; |
983 | } | | 987 | } |
984 | | | 988 | |
985 | if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { | | 989 | if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { |
986 | mode->width_mm = edid->width_cm * 10; | | 990 | mode->width_mm = edid->width_cm * 10; |
987 | mode->height_mm = edid->height_cm * 10; | | 991 | mode->height_mm = edid->height_cm * 10; |
988 | } | | 992 | } |
989 | | | 993 | |
990 | mode->type = DRM_MODE_TYPE_DRIVER; | | 994 | mode->type = DRM_MODE_TYPE_DRIVER; |
991 | drm_mode_set_name(mode); | | 995 | drm_mode_set_name(mode); |
992 | | | 996 | |
993 | return mode; | | 997 | return mode; |
994 | } | | 998 | } |
995 | | | 999 | |
996 | static bool | | 1000 | static bool |
997 | mode_in_hsync_range(const struct drm_display_mode *mode, | | 1001 | mode_in_hsync_range(const struct drm_display_mode *mode, |
998 | struct edid *edid, u8 *t) | | 1002 | struct edid *edid, u8 *t) |
999 | { | | 1003 | { |
1000 | int hsync, hmin, hmax; | | 1004 | int hsync, hmin, hmax; |
1001 | | | 1005 | |
1002 | hmin = t[7]; | | 1006 | hmin = t[7]; |
1003 | if (edid->revision >= 4) | | 1007 | if (edid->revision >= 4) |
1004 | hmin += ((t[4] & 0x04) ? 255 : 0); | | 1008 | hmin += ((t[4] & 0x04) ? 255 : 0); |
1005 | hmax = t[8]; | | 1009 | hmax = t[8]; |
1006 | if (edid->revision >= 4) | | 1010 | if (edid->revision >= 4) |
1007 | hmax += ((t[4] & 0x08) ? 255 : 0); | | 1011 | hmax += ((t[4] & 0x08) ? 255 : 0); |
1008 | hsync = drm_mode_hsync(mode); | | 1012 | hsync = drm_mode_hsync(mode); |
1009 | | | 1013 | |
1010 | return (hsync <= hmax && hsync >= hmin); | | 1014 | return (hsync <= hmax && hsync >= hmin); |
1011 | } | | 1015 | } |
1012 | | | 1016 | |
1013 | static bool | | 1017 | static bool |
1014 | mode_in_vsync_range(const struct drm_display_mode *mode, | | 1018 | mode_in_vsync_range(const struct drm_display_mode *mode, |
1015 | struct edid *edid, u8 *t) | | 1019 | struct edid *edid, u8 *t) |
1016 | { | | 1020 | { |
1017 | int vsync, vmin, vmax; | | 1021 | int vsync, vmin, vmax; |
1018 | | | 1022 | |
1019 | vmin = t[5]; | | 1023 | vmin = t[5]; |
1020 | if (edid->revision >= 4) | | 1024 | if (edid->revision >= 4) |
1021 | vmin += ((t[4] & 0x01) ? 255 : 0); | | 1025 | vmin += ((t[4] & 0x01) ? 255 : 0); |
1022 | vmax = t[6]; | | 1026 | vmax = t[6]; |
1023 | if (edid->revision >= 4) | | 1027 | if (edid->revision >= 4) |
1024 | vmax += ((t[4] & 0x02) ? 255 : 0); | | 1028 | vmax += ((t[4] & 0x02) ? 255 : 0); |
1025 | vsync = drm_mode_vrefresh(mode); | | 1029 | vsync = drm_mode_vrefresh(mode); |
1026 | | | 1030 | |
1027 | return (vsync <= vmax && vsync >= vmin); | | 1031 | return (vsync <= vmax && vsync >= vmin); |
1028 | } | | 1032 | } |
1029 | | | 1033 | |
1030 | static u32 | | 1034 | static u32 |
1031 | range_pixel_clock(struct edid *edid, u8 *t) | | 1035 | range_pixel_clock(struct edid *edid, u8 *t) |
1032 | { | | 1036 | { |
1033 | /* unspecified */ | | 1037 | /* unspecified */ |
1034 | if (t[9] == 0 || t[9] == 255) | | 1038 | if (t[9] == 0 || t[9] == 255) |
1035 | return 0; | | 1039 | return 0; |
1036 | | | 1040 | |
1037 | /* 1.4 with CVT support gives us real precision, yay */ | | 1041 | /* 1.4 with CVT support gives us real precision, yay */ |
1038 | if (edid->revision >= 4 && t[10] == 0x04) | | 1042 | if (edid->revision >= 4 && t[10] == 0x04) |
1039 | return (t[9] * 10000) - ((t[12] >> 2) * 250); | | 1043 | return (t[9] * 10000) - ((t[12] >> 2) * 250); |
1040 | | | 1044 | |
1041 | /* 1.3 is pathetic, so fuzz up a bit */ | | 1045 | /* 1.3 is pathetic, so fuzz up a bit */ |
1042 | return t[9] * 10000 + 5001; | | 1046 | return t[9] * 10000 + 5001; |
1043 | } | | 1047 | } |
1044 | | | 1048 | |
1045 | static bool | | 1049 | static bool |
1046 | mode_in_range(const struct drm_display_mode *mode, struct edid *edid, | | 1050 | mode_in_range(const struct drm_display_mode *mode, struct edid *edid, |
1047 | struct detailed_timing *timing) | | 1051 | struct detailed_timing *timing) |
1048 | { | | 1052 | { |
1049 | u32 max_clock; | | 1053 | u32 max_clock; |
1050 | u8 *t = (u8 *)timing; | | 1054 | u8 *t = (u8 *)timing; |
1051 | | | 1055 | |
1052 | if (!mode_in_hsync_range(mode, edid, t)) | | 1056 | if (!mode_in_hsync_range(mode, edid, t)) |
1053 | return false; | | 1057 | return false; |
1054 | | | 1058 | |
1055 | if (!mode_in_vsync_range(mode, edid, t)) | | 1059 | if (!mode_in_vsync_range(mode, edid, t)) |
1056 | return false; | | 1060 | return false; |
1057 | | | 1061 | |
1058 | if ((max_clock = range_pixel_clock(edid, t))) | | 1062 | if ((max_clock = range_pixel_clock(edid, t))) |
1059 | if (mode->clock > max_clock) | | 1063 | if (mode->clock > max_clock) |
1060 | return false; | | 1064 | return false; |
1061 | | | 1065 | |
1062 | /* 1.4 max horizontal check */ | | 1066 | /* 1.4 max horizontal check */ |
1063 | if (edid->revision >= 4 && t[10] == 0x04) | | 1067 | if (edid->revision >= 4 && t[10] == 0x04) |
1064 | if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) | | 1068 | if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) |
1065 | return false; | | 1069 | return false; |
1066 | | | 1070 | |
1067 | if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) | | 1071 | if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) |
1068 | return false; | | 1072 | return false; |
1069 | | | 1073 | |
1070 | return true; | | 1074 | return true; |
1071 | } | | 1075 | } |
1072 | | | 1076 | |
1073 | static bool valid_inferred_mode(const struct drm_connector *connector, | | 1077 | static bool valid_inferred_mode(const struct drm_connector *connector, |
1074 | const struct drm_display_mode *mode) | | 1078 | const struct drm_display_mode *mode) |
1075 | { | | 1079 | { |
1076 | struct drm_display_mode *m; | | 1080 | struct drm_display_mode *m; |
1077 | bool ok = false; | | 1081 | bool ok = false; |
1078 | | | 1082 | |
1079 | list_for_each_entry(m, &connector->probed_modes, head) { | | 1083 | list_for_each_entry(m, &connector->probed_modes, head) { |
1080 | if (mode->hdisplay == m->hdisplay && | | 1084 | if (mode->hdisplay == m->hdisplay && |
1081 | mode->vdisplay == m->vdisplay && | | 1085 | mode->vdisplay == m->vdisplay && |
1082 | drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) | | 1086 | drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) |
1083 | return false; /* duplicated */ | | 1087 | return false; /* duplicated */ |
1084 | if (mode->hdisplay <= m->hdisplay && | | 1088 | if (mode->hdisplay <= m->hdisplay && |
1085 | mode->vdisplay <= m->vdisplay) | | 1089 | mode->vdisplay <= m->vdisplay) |
1086 | ok = true; | | 1090 | ok = true; |
1087 | } | | 1091 | } |
1088 | return ok; | | 1092 | return ok; |
1089 | } | | 1093 | } |
1090 | | | 1094 | |
1091 | static int | | 1095 | static int |
1092 | drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, | | 1096 | drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, |
1093 | struct detailed_timing *timing) | | 1097 | struct detailed_timing *timing) |
1094 | { | | 1098 | { |
1095 | int i, modes = 0; | | 1099 | int i, modes = 0; |
1096 | struct drm_display_mode *newmode; | | 1100 | struct drm_display_mode *newmode; |
1097 | struct drm_device *dev = connector->dev; | | 1101 | struct drm_device *dev = connector->dev; |
1098 | | | 1102 | |
1099 | for (i = 0; i < drm_num_dmt_modes; i++) { | | 1103 | for (i = 0; i < drm_num_dmt_modes; i++) { |
1100 | if (mode_in_range(drm_dmt_modes + i, edid, timing) && | | 1104 | if (mode_in_range(drm_dmt_modes + i, edid, timing) && |
1101 | valid_inferred_mode(connector, drm_dmt_modes + i)) { | | 1105 | valid_inferred_mode(connector, drm_dmt_modes + i)) { |
1102 | newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); | | 1106 | newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); |
1103 | if (newmode) { | | 1107 | if (newmode) { |
1104 | drm_mode_probed_add(connector, newmode); | | 1108 | drm_mode_probed_add(connector, newmode); |
1105 | modes++; | | 1109 | modes++; |
1106 | } | | 1110 | } |
1107 | } | | 1111 | } |
1108 | } | | 1112 | } |
1109 | | | 1113 | |
1110 | return modes; | | 1114 | return modes; |
1111 | } | | 1115 | } |
1112 | | | 1116 | |
1113 | /* fix up 1366x768 mode from 1368x768; | | 1117 | /* fix up 1366x768 mode from 1368x768; |
1114 | * GFT/CVT can't express 1366 width which isn't dividable by 8 | | 1118 | * GFT/CVT can't express 1366 width which isn't dividable by 8 |
1115 | */ | | 1119 | */ |
1116 | static void fixup_mode_1366x768(struct drm_display_mode *mode) | | 1120 | static void fixup_mode_1366x768(struct drm_display_mode *mode) |
1117 | { | | 1121 | { |
1118 | if (mode->hdisplay == 1368 && mode->vdisplay == 768) { | | 1122 | if (mode->hdisplay == 1368 && mode->vdisplay == 768) { |
1119 | mode->hdisplay = 1366; | | 1123 | mode->hdisplay = 1366; |
1120 | mode->hsync_start--; | | 1124 | mode->hsync_start--; |
1121 | mode->hsync_end--; | | 1125 | mode->hsync_end--; |
1122 | drm_mode_set_name(mode); | | 1126 | drm_mode_set_name(mode); |
1123 | } | | 1127 | } |
1124 | } | | 1128 | } |
1125 | | | 1129 | |
1126 | static int | | 1130 | static int |
1127 | drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, | | 1131 | drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, |
1128 | struct detailed_timing *timing) | | 1132 | struct detailed_timing *timing) |
1129 | { | | 1133 | { |
1130 | int i, modes = 0; | | 1134 | int i, modes = 0; |
1131 | struct drm_display_mode *newmode; | | 1135 | struct drm_display_mode *newmode; |
1132 | struct drm_device *dev = connector->dev; | | 1136 | struct drm_device *dev = connector->dev; |
1133 | | | 1137 | |
1134 | for (i = 0; i < num_extra_modes; i++) { | | 1138 | for (i = 0; i < num_extra_modes; i++) { |
1135 | const struct minimode *m = &extra_modes[i]; | | 1139 | const struct minimode *m = &extra_modes[i]; |
1136 | newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); | | 1140 | newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); |
1137 | if (!newmode) | | 1141 | if (!newmode) |
1138 | return modes; | | 1142 | return modes; |
1139 | | | 1143 | |
1140 | fixup_mode_1366x768(newmode); | | 1144 | fixup_mode_1366x768(newmode); |
1141 | if (!mode_in_range(newmode, edid, timing) || | | 1145 | if (!mode_in_range(newmode, edid, timing) || |
1142 | !valid_inferred_mode(connector, newmode)) { | | 1146 | !valid_inferred_mode(connector, newmode)) { |
1143 | drm_mode_destroy(dev, newmode); | | 1147 | drm_mode_destroy(dev, newmode); |
1144 | continue; | | 1148 | continue; |
1145 | } | | 1149 | } |
1146 | | | 1150 | |
1147 | drm_mode_probed_add(connector, newmode); | | 1151 | drm_mode_probed_add(connector, newmode); |
1148 | modes++; | | 1152 | modes++; |
1149 | } | | 1153 | } |
1150 | | | 1154 | |
1151 | return modes; | | 1155 | return modes; |
1152 | } | | 1156 | } |
1153 | | | 1157 | |
1154 | static int | | 1158 | static int |
1155 | drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, | | 1159 | drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, |
1156 | struct detailed_timing *timing) | | 1160 | struct detailed_timing *timing) |
1157 | { | | 1161 | { |
1158 | int i, modes = 0; | | 1162 | int i, modes = 0; |
1159 | struct drm_display_mode *newmode; | | 1163 | struct drm_display_mode *newmode; |
1160 | struct drm_device *dev = connector->dev; | | 1164 | struct drm_device *dev = connector->dev; |
1161 | bool rb = drm_monitor_supports_rb(edid); | | 1165 | bool rb = drm_monitor_supports_rb(edid); |
1162 | | | 1166 | |
1163 | for (i = 0; i < num_extra_modes; i++) { | | 1167 | for (i = 0; i < num_extra_modes; i++) { |
1164 | const struct minimode *m = &extra_modes[i]; | | 1168 | const struct minimode *m = &extra_modes[i]; |
1165 | newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); | | 1169 | newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); |
1166 | if (!newmode) | | 1170 | if (!newmode) |
1167 | return modes; | | 1171 | return modes; |
1168 | | | 1172 | |
1169 | fixup_mode_1366x768(newmode); | | 1173 | fixup_mode_1366x768(newmode); |
1170 | if (!mode_in_range(newmode, edid, timing) || | | 1174 | if (!mode_in_range(newmode, edid, timing) || |
1171 | !valid_inferred_mode(connector, newmode)) { | | 1175 | !valid_inferred_mode(connector, newmode)) { |
1172 | drm_mode_destroy(dev, newmode); | | 1176 | drm_mode_destroy(dev, newmode); |
1173 | continue; | | 1177 | continue; |
1174 | } | | 1178 | } |
1175 | | | 1179 | |
1176 | drm_mode_probed_add(connector, newmode); | | 1180 | drm_mode_probed_add(connector, newmode); |
1177 | modes++; | | 1181 | modes++; |
1178 | } | | 1182 | } |
1179 | | | 1183 | |
1180 | return modes; | | 1184 | return modes; |
1181 | } | | 1185 | } |
1182 | | | 1186 | |
1183 | static void | | 1187 | static void |
1184 | do_inferred_modes(struct detailed_timing *timing, void *c) | | 1188 | do_inferred_modes(struct detailed_timing *timing, void *c) |
1185 | { | | 1189 | { |
1186 | struct detailed_mode_closure *closure = c; | | 1190 | struct detailed_mode_closure *closure = c; |
1187 | struct detailed_non_pixel *data = &timing->data.other_data; | | 1191 | struct detailed_non_pixel *data = &timing->data.other_data; |
1188 | struct detailed_data_monitor_range *range = &data->data.range; | | 1192 | struct detailed_data_monitor_range *range = &data->data.range; |
1189 | | | 1193 | |
1190 | if (data->type != EDID_DETAIL_MONITOR_RANGE) | | 1194 | if (data->type != EDID_DETAIL_MONITOR_RANGE) |
1191 | return; | | 1195 | return; |
1192 | | | 1196 | |
1193 | closure->modes += drm_dmt_modes_for_range(closure->connector, | | 1197 | closure->modes += drm_dmt_modes_for_range(closure->connector, |
1194 | closure->edid, | | 1198 | closure->edid, |
1195 | timing); | | 1199 | timing); |
1196 | | | 1200 | |
1197 | if (!version_greater(closure->edid, 1, 1)) | | 1201 | if (!version_greater(closure->edid, 1, 1)) |
1198 | return; /* GTF not defined yet */ | | 1202 | return; /* GTF not defined yet */ |
1199 | | | 1203 | |
1200 | switch (range->flags) { | | 1204 | switch (range->flags) { |
1201 | case 0x02: /* secondary gtf, XXX could do more */ | | 1205 | case 0x02: /* secondary gtf, XXX could do more */ |
1202 | case 0x00: /* default gtf */ | | 1206 | case 0x00: /* default gtf */ |
1203 | closure->modes += drm_gtf_modes_for_range(closure->connector, | | 1207 | closure->modes += drm_gtf_modes_for_range(closure->connector, |
1204 | closure->edid, | | 1208 | closure->edid, |
1205 | timing); | | 1209 | timing); |
1206 | break; | | 1210 | break; |
1207 | case 0x04: /* cvt, only in 1.4+ */ | | 1211 | case 0x04: /* cvt, only in 1.4+ */ |
1208 | if (!version_greater(closure->edid, 1, 3)) | | 1212 | if (!version_greater(closure->edid, 1, 3)) |
1209 | break; | | 1213 | break; |
1210 | | | 1214 | |
1211 | closure->modes += drm_cvt_modes_for_range(closure->connector, | | 1215 | closure->modes += drm_cvt_modes_for_range(closure->connector, |
1212 | closure->edid, | | 1216 | closure->edid, |
1213 | timing); | | 1217 | timing); |
1214 | break; | | 1218 | break; |
1215 | case 0x01: /* just the ranges, no formula */ | | 1219 | case 0x01: /* just the ranges, no formula */ |
1216 | default: | | 1220 | default: |
1217 | break; | | 1221 | break; |
1218 | } | | 1222 | } |
1219 | } | | 1223 | } |
1220 | | | 1224 | |
1221 | static int | | 1225 | static int |
1222 | add_inferred_modes(struct drm_connector *connector, struct edid *edid) | | 1226 | add_inferred_modes(struct drm_connector *connector, struct edid *edid) |
1223 | { | | 1227 | { |
1224 | struct detailed_mode_closure closure = { | | 1228 | struct detailed_mode_closure closure = { |
1225 | connector, edid, 0, 0, 0 | | 1229 | connector, edid, 0, 0, 0 |
1226 | }; | | 1230 | }; |
1227 | | | 1231 | |
1228 | if (version_greater(edid, 1, 0)) | | 1232 | if (version_greater(edid, 1, 0)) |
1229 | drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, | | 1233 | drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, |
1230 | &closure); | | 1234 | &closure); |
1231 | | | 1235 | |
1232 | return closure.modes; | | 1236 | return closure.modes; |
1233 | } | | 1237 | } |
1234 | | | 1238 | |
1235 | static int | | 1239 | static int |
1236 | drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) | | 1240 | drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) |
1237 | { | | 1241 | { |
1238 | int i, j, m, modes = 0; | | 1242 | int i, j, m, modes = 0; |
1239 | struct drm_display_mode *mode; | | 1243 | struct drm_display_mode *mode; |
1240 | u8 *est = ((u8 *)timing) + 5; | | 1244 | u8 *est = ((u8 *)timing) + 5; |
1241 | | | 1245 | |
1242 | for (i = 0; i < 6; i++) { | | 1246 | for (i = 0; i < 6; i++) { |
1243 | for (j = 7; j > 0; j--) { | | 1247 | for (j = 7; j > 0; j--) { |
1244 | m = (i * 8) + (7 - j); | | 1248 | m = (i * 8) + (7 - j); |
1245 | if (m >= ARRAY_SIZE(est3_modes)) | | 1249 | if (m >= ARRAY_SIZE(est3_modes)) |
1246 | break; | | 1250 | break; |
1247 | if (est[i] & (1 << j)) { | | 1251 | if (est[i] & (1 << j)) { |
1248 | mode = drm_mode_find_dmt(connector->dev, | | 1252 | mode = drm_mode_find_dmt(connector->dev, |
1249 | est3_modes[m].w, | | 1253 | est3_modes[m].w, |
1250 | est3_modes[m].h, | | 1254 | est3_modes[m].h, |
1251 | est3_modes[m].r, | | 1255 | est3_modes[m].r, |
1252 | est3_modes[m].rb); | | 1256 | est3_modes[m].rb); |
1253 | if (mode) { | | 1257 | if (mode) { |
1254 | drm_mode_probed_add(connector, mode); | | 1258 | drm_mode_probed_add(connector, mode); |
1255 | modes++; | | 1259 | modes++; |
1256 | } | | 1260 | } |
1257 | } | | 1261 | } |
1258 | } | | 1262 | } |
1259 | } | | 1263 | } |
1260 | | | 1264 | |
1261 | return modes; | | 1265 | return modes; |
1262 | } | | 1266 | } |
1263 | | | 1267 | |
1264 | static void | | 1268 | static void |
1265 | do_established_modes(struct detailed_timing *timing, void *c) | | 1269 | do_established_modes(struct detailed_timing *timing, void *c) |
1266 | { | | 1270 | { |
1267 | struct detailed_mode_closure *closure = c; | | 1271 | struct detailed_mode_closure *closure = c; |
1268 | struct detailed_non_pixel *data = &timing->data.other_data; | | 1272 | struct detailed_non_pixel *data = &timing->data.other_data; |
1269 | | | 1273 | |
1270 | if (data->type == EDID_DETAIL_EST_TIMINGS) | | 1274 | if (data->type == EDID_DETAIL_EST_TIMINGS) |
1271 | closure->modes += drm_est3_modes(closure->connector, timing); | | 1275 | closure->modes += drm_est3_modes(closure->connector, timing); |
1272 | } | | 1276 | } |
1273 | | | 1277 | |
1274 | /** | | 1278 | /** |
1275 | * add_established_modes - get est. modes from EDID and add them | | 1279 | * add_established_modes - get est. modes from EDID and add them |
1276 | * @edid: EDID block to scan | | 1280 | * @edid: EDID block to scan |
1277 | * | | 1281 | * |
1278 | * Each EDID block contains a bitmap of the supported "established modes" list | | 1282 | * Each EDID block contains a bitmap of the supported "established modes" list |
1279 | * (defined above). Tease them out and add them to the global modes list. | | 1283 | * (defined above). Tease them out and add them to the global modes list. |
1280 | */ | | 1284 | */ |
1281 | static int | | 1285 | static int |
1282 | add_established_modes(struct drm_connector *connector, struct edid *edid) | | 1286 | add_established_modes(struct drm_connector *connector, struct edid *edid) |
1283 | { | | 1287 | { |
1284 | struct drm_device *dev = connector->dev; | | 1288 | struct drm_device *dev = connector->dev; |
1285 | unsigned long est_bits = edid->established_timings.t1 | | | 1289 | unsigned long est_bits = edid->established_timings.t1 | |
1286 | (edid->established_timings.t2 << 8) | | | 1290 | (edid->established_timings.t2 << 8) | |
1287 | ((edid->established_timings.mfg_rsvd & 0x80) << 9); | | 1291 | ((edid->established_timings.mfg_rsvd & 0x80) << 9); |
1288 | int i, modes = 0; | | 1292 | int i, modes = 0; |
1289 | struct detailed_mode_closure closure = { | | 1293 | struct detailed_mode_closure closure = { |
1290 | connector, edid, 0, 0, 0 | | 1294 | connector, edid, 0, 0, 0 |
1291 | }; | | 1295 | }; |
1292 | | | 1296 | |
1293 | for (i = 0; i <= EDID_EST_TIMINGS; i++) { | | 1297 | for (i = 0; i <= EDID_EST_TIMINGS; i++) { |
1294 | if (est_bits & (1<<i)) { | | 1298 | if (est_bits & (1<<i)) { |
1295 | struct drm_display_mode *newmode; | | 1299 | struct drm_display_mode *newmode; |
1296 | newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); | | 1300 | newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); |
1297 | if (newmode) { | | 1301 | if (newmode) { |
1298 | drm_mode_probed_add(connector, newmode); | | 1302 | drm_mode_probed_add(connector, newmode); |
1299 | modes++; | | 1303 | modes++; |
1300 | } | | 1304 | } |
1301 | } | | 1305 | } |
1302 | } | | 1306 | } |
1303 | | | 1307 | |
1304 | if (version_greater(edid, 1, 0)) | | 1308 | if (version_greater(edid, 1, 0)) |
1305 | drm_for_each_detailed_block((u8 *)edid, | | 1309 | drm_for_each_detailed_block((u8 *)edid, |
1306 | do_established_modes, &closure); | | 1310 | do_established_modes, &closure); |
1307 | | | 1311 | |
1308 | return modes + closure.modes; | | 1312 | return modes + closure.modes; |
1309 | } | | 1313 | } |
1310 | | | 1314 | |
1311 | static void | | 1315 | static void |
1312 | do_standard_modes(struct detailed_timing *timing, void *c) | | 1316 | do_standard_modes(struct detailed_timing *timing, void *c) |
1313 | { | | 1317 | { |
1314 | struct detailed_mode_closure *closure = c; | | 1318 | struct detailed_mode_closure *closure = c; |
1315 | struct detailed_non_pixel *data = &timing->data.other_data; | | 1319 | struct detailed_non_pixel *data = &timing->data.other_data; |
1316 | struct drm_connector *connector = closure->connector; | | 1320 | struct drm_connector *connector = closure->connector; |
1317 | struct edid *edid = closure->edid; | | 1321 | struct edid *edid = closure->edid; |
1318 | | | 1322 | |
1319 | if (data->type == EDID_DETAIL_STD_MODES) { | | 1323 | if (data->type == EDID_DETAIL_STD_MODES) { |
1320 | int i; | | 1324 | int i; |
1321 | for (i = 0; i < 6; i++) { | | 1325 | for (i = 0; i < 6; i++) { |
1322 | struct std_timing *std; | | 1326 | struct std_timing *std; |
1323 | struct drm_display_mode *newmode; | | 1327 | struct drm_display_mode *newmode; |
1324 | | | 1328 | |
1325 | std = &data->data.timings[i]; | | 1329 | std = &data->data.timings[i]; |
1326 | newmode = drm_mode_std(connector, edid, std, | | 1330 | newmode = drm_mode_std(connector, edid, std, |
1327 | edid->revision); | | 1331 | edid->revision); |
1328 | if (newmode) { | | 1332 | if (newmode) { |
1329 | drm_mode_probed_add(connector, newmode); | | 1333 | drm_mode_probed_add(connector, newmode); |
1330 | closure->modes++; | | 1334 | closure->modes++; |
1331 | } | | 1335 | } |
1332 | } | | 1336 | } |
1333 | } | | 1337 | } |
1334 | } | | 1338 | } |
1335 | | | 1339 | |
1336 | /** | | 1340 | /** |
1337 | * add_standard_modes - get std. modes from EDID and add them | | 1341 | * add_standard_modes - get std. modes from EDID and add them |
1338 | * @edid: EDID block to scan | | 1342 | * @edid: EDID block to scan |
1339 | * | | 1343 | * |
1340 | * Standard modes can be calculated using the appropriate standard (DMT, | | 1344 | * Standard modes can be calculated using the appropriate standard (DMT, |
1341 | * GTF or CVT. Grab them from @edid and add them to the list. | | 1345 | * GTF or CVT. Grab them from @edid and add them to the list. |
1342 | */ | | 1346 | */ |
1343 | static int | | 1347 | static int |
1344 | add_standard_modes(struct drm_connector *connector, struct edid *edid) | | 1348 | add_standard_modes(struct drm_connector *connector, struct edid *edid) |
1345 | { | | 1349 | { |
1346 | int i, modes = 0; | | 1350 | int i, modes = 0; |
1347 | struct detailed_mode_closure closure = { | | 1351 | struct detailed_mode_closure closure = { |
1348 | connector, edid, 0, 0, 0 | | 1352 | connector, edid, 0, 0, 0 |
1349 | }; | | 1353 | }; |
1350 | | | 1354 | |
1351 | for (i = 0; i < EDID_STD_TIMINGS; i++) { | | 1355 | for (i = 0; i < EDID_STD_TIMINGS; i++) { |
1352 | struct drm_display_mode *newmode; | | 1356 | struct drm_display_mode *newmode; |
1353 | | | 1357 | |
1354 | newmode = drm_mode_std(connector, edid, | | 1358 | newmode = drm_mode_std(connector, edid, |
1355 | &edid->standard_timings[i], | | 1359 | &edid->standard_timings[i], |
1356 | edid->revision); | | 1360 | edid->revision); |
1357 | if (newmode) { | | 1361 | if (newmode) { |
1358 | drm_mode_probed_add(connector, newmode); | | 1362 | drm_mode_probed_add(connector, newmode); |
1359 | modes++; | | 1363 | modes++; |
1360 | } | | 1364 | } |
1361 | } | | 1365 | } |
1362 | | | 1366 | |
1363 | if (version_greater(edid, 1, 0)) | | 1367 | if (version_greater(edid, 1, 0)) |
1364 | drm_for_each_detailed_block((u8 *)edid, do_standard_modes, | | 1368 | drm_for_each_detailed_block((u8 *)edid, do_standard_modes, |
1365 | &closure); | | 1369 | &closure); |
1366 | | | 1370 | |
1367 | /* XXX should also look for standard codes in VTB blocks */ | | 1371 | /* XXX should also look for standard codes in VTB blocks */ |
1368 | | | 1372 | |
1369 | return modes + closure.modes; | | 1373 | return modes + closure.modes; |
1370 | } | | 1374 | } |
1371 | | | 1375 | |
1372 | static int drm_cvt_modes(struct drm_connector *connector, | | 1376 | static int drm_cvt_modes(struct drm_connector *connector, |
1373 | struct detailed_timing *timing) | | 1377 | struct detailed_timing *timing) |
1374 | { | | 1378 | { |
1375 | int i, j, modes = 0; | | 1379 | int i, j, modes = 0; |
1376 | struct drm_display_mode *newmode; | | 1380 | struct drm_display_mode *newmode; |
1377 | struct drm_device *dev = connector->dev; | | 1381 | struct drm_device *dev = connector->dev; |
1378 | struct cvt_timing *cvt; | | 1382 | struct cvt_timing *cvt; |
1379 | const int rates[] = { 60, 85, 75, 60, 50 }; | | 1383 | const int rates[] = { 60, 85, 75, 60, 50 }; |
1380 | const u8 empty[3] = { 0, 0, 0 }; | | 1384 | const u8 empty[3] = { 0, 0, 0 }; |
1381 | | | 1385 | |
1382 | for (i = 0; i < 4; i++) { | | 1386 | for (i = 0; i < 4; i++) { |
1383 | int uninitialized_var(width), height; | | 1387 | int uninitialized_var(width), height; |
1384 | cvt = &(timing->data.other_data.data.cvt[i]); | | 1388 | cvt = &(timing->data.other_data.data.cvt[i]); |
1385 | | | 1389 | |
1386 | if (!memcmp(cvt->code, empty, 3)) | | 1390 | if (!memcmp(cvt->code, empty, 3)) |
1387 | continue; | | 1391 | continue; |
1388 | | | 1392 | |
1389 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; | | 1393 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; |
1390 | switch (cvt->code[1] & 0x0c) { | | 1394 | switch (cvt->code[1] & 0x0c) { |
1391 | case 0x00: | | 1395 | case 0x00: |
1392 | width = height * 4 / 3; | | 1396 | width = height * 4 / 3; |
1393 | break; | | 1397 | break; |
1394 | case 0x04: | | 1398 | case 0x04: |
1395 | width = height * 16 / 9; | | 1399 | width = height * 16 / 9; |
1396 | break; | | 1400 | break; |
1397 | case 0x08: | | 1401 | case 0x08: |
1398 | width = height * 16 / 10; | | 1402 | width = height * 16 / 10; |
1399 | break; | | 1403 | break; |
1400 | case 0x0c: | | 1404 | case 0x0c: |
1401 | width = height * 15 / 9; | | 1405 | width = height * 15 / 9; |
1402 | break; | | 1406 | break; |
1403 | } | | 1407 | } |
1404 | | | 1408 | |
1405 | for (j = 1; j < 5; j++) { | | 1409 | for (j = 1; j < 5; j++) { |
1406 | if (cvt->code[2] & (1 << j)) { | | 1410 | if (cvt->code[2] & (1 << j)) { |
1407 | newmode = drm_cvt_mode(dev, width, height, | | 1411 | newmode = drm_cvt_mode(dev, width, height, |
1408 | rates[j], j == 0, | | 1412 | rates[j], j == 0, |
1409 | false, false); | | 1413 | false, false); |
1410 | if (newmode) { | | 1414 | if (newmode) { |
1411 | drm_mode_probed_add(connector, newmode); | | 1415 | drm_mode_probed_add(connector, newmode); |
1412 | modes++; | | 1416 | modes++; |
1413 | } | | 1417 | } |
1414 | } | | 1418 | } |
1415 | } | | 1419 | } |
1416 | } | | 1420 | } |
1417 | | | 1421 | |
1418 | return modes; | | 1422 | return modes; |
1419 | } | | 1423 | } |
1420 | | | 1424 | |
1421 | static void | | 1425 | static void |
1422 | do_cvt_mode(struct detailed_timing *timing, void *c) | | 1426 | do_cvt_mode(struct detailed_timing *timing, void *c) |
1423 | { | | 1427 | { |
1424 | struct detailed_mode_closure *closure = c; | | 1428 | struct detailed_mode_closure *closure = c; |
1425 | struct detailed_non_pixel *data = &timing->data.other_data; | | 1429 | struct detailed_non_pixel *data = &timing->data.other_data; |
1426 | | | 1430 | |
1427 | if (data->type == EDID_DETAIL_CVT_3BYTE) | | 1431 | if (data->type == EDID_DETAIL_CVT_3BYTE) |
1428 | closure->modes += drm_cvt_modes(closure->connector, timing); | | 1432 | closure->modes += drm_cvt_modes(closure->connector, timing); |
1429 | } | | 1433 | } |
1430 | | | 1434 | |
1431 | static int | | 1435 | static int |
1432 | add_cvt_modes(struct drm_connector *connector, struct edid *edid) | | 1436 | add_cvt_modes(struct drm_connector *connector, struct edid *edid) |
1433 | { | | 1437 | { |
1434 | struct detailed_mode_closure closure = { | | 1438 | struct detailed_mode_closure closure = { |
1435 | connector, edid, 0, 0, 0 | | 1439 | connector, edid, 0, 0, 0 |
1436 | }; | | 1440 | }; |
1437 | | | 1441 | |
1438 | if (version_greater(edid, 1, 2)) | | 1442 | if (version_greater(edid, 1, 2)) |