Panda3D
texture.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file texture.cxx
10 * @author mike
11 * @date 1997-01-09
12 * @author fperazzi, PandaSE
13 * @date 2010-04-29
14 */
15
16#include "pandabase.h"
17#include "texture.h"
18#include "config_gobj.h"
19#include "config_putil.h"
20#include "texturePool.h"
21#include "textureContext.h"
22#include "bamCache.h"
23#include "bamCacheRecord.h"
24#include "datagram.h"
25#include "datagramIterator.h"
26#include "bamReader.h"
27#include "bamWriter.h"
28#include "string_utils.h"
30#include "pnmImage.h"
31#include "pnmReader.h"
32#include "pfmFile.h"
33#include "pnmFileTypeRegistry.h"
34#include "virtualFileSystem.h"
35#include "datagramInputFile.h"
36#include "datagramOutputFile.h"
37#include "bam.h"
38#include "zStream.h"
39#include "indent.h"
40#include "cmath.h"
41#include "pStatTimer.h"
42#include "pbitops.h"
43#include "streamReader.h"
44#include "texturePeeker.h"
45#include "convert_srgb.h"
46
47#ifdef HAVE_SQUISH
48#include <squish.h>
49#endif // HAVE_SQUISH
50
51#include <stddef.h>
52
53using std::endl;
54using std::istream;
55using std::max;
56using std::min;
57using std::ostream;
58using std::string;
59using std::swap;
60
62("texture-quality-level", Texture::QL_normal,
63 PRC_DESC("This specifies a global quality level for all textures. You "
64 "may specify either fastest, normal, or best. This actually "
65 "affects the meaning of Texture::set_quality_level(QL_default), "
66 "so it may be overridden on a per-texture basis. This generally "
67 "only has an effect when using the tinydisplay software renderer; "
68 "it has little or no effect on normal, hardware-accelerated "
69 "renderers. See Texture::set_quality_level()."));
70
71PStatCollector Texture::_texture_read_pcollector("*:Texture:Read");
72TypeHandle Texture::_type_handle;
73TypeHandle Texture::CData::_type_handle;
74AutoTextureScale Texture::_textures_power_2 = ATS_unspecified;
75
76// Stuff to read and write DDS files.
77
78// little-endian, of course
79#define DDS_MAGIC 0x20534444
80
81
82// DDS_header.dwFlags
83#define DDSD_CAPS 0x00000001
84#define DDSD_HEIGHT 0x00000002
85#define DDSD_WIDTH 0x00000004
86#define DDSD_PITCH 0x00000008
87#define DDSD_PIXELFORMAT 0x00001000
88#define DDSD_MIPMAPCOUNT 0x00020000
89#define DDSD_LINEARSIZE 0x00080000
90#define DDSD_DEPTH 0x00800000
91
92// DDS_header.sPixelFormat.dwFlags
93#define DDPF_ALPHAPIXELS 0x00000001
94#define DDPF_FOURCC 0x00000004
95#define DDPF_INDEXED 0x00000020
96#define DDPF_RGB 0x00000040
97
98// DDS_header.sCaps.dwCaps1
99#define DDSCAPS_COMPLEX 0x00000008
100#define DDSCAPS_TEXTURE 0x00001000
101#define DDSCAPS_MIPMAP 0x00400000
102
103// DDS_header.sCaps.dwCaps2
104#define DDSCAPS2_CUBEMAP 0x00000200
105#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
106#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
107#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
108#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
109#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
110#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
111#define DDSCAPS2_VOLUME 0x00200000
112
113struct DDSPixelFormat {
114 unsigned int pf_size;
115 unsigned int pf_flags;
116 unsigned int four_cc;
117 unsigned int rgb_bitcount;
118 unsigned int r_mask;
119 unsigned int g_mask;
120 unsigned int b_mask;
121 unsigned int a_mask;
122};
123
124struct DDSCaps2 {
125 unsigned int caps1;
126 unsigned int caps2;
127 unsigned int ddsx;
128};
129
130struct DDSHeader {
131 unsigned int dds_magic;
132 unsigned int dds_size;
133 unsigned int dds_flags;
134 unsigned int height;
135 unsigned int width;
136 unsigned int pitch;
137 unsigned int depth;
138 unsigned int num_levels;
139
140 DDSPixelFormat pf;
141 DDSCaps2 caps;
142};
143
144// Stuff to read KTX files.
145enum KTXType {
146 KTX_BYTE = 0x1400,
147 KTX_UNSIGNED_BYTE = 0x1401,
148 KTX_SHORT = 0x1402,
149 KTX_UNSIGNED_SHORT = 0x1403,
150 KTX_INT = 0x1404,
151 KTX_UNSIGNED_INT = 0x1405,
152 KTX_FLOAT = 0x1406,
153 KTX_HALF_FLOAT = 0x140B,
154 KTX_UNSIGNED_BYTE_3_3_2 = 0x8032,
155 KTX_UNSIGNED_SHORT_4_4_4_4 = 0x8033,
156 KTX_UNSIGNED_SHORT_5_5_5_1 = 0x8034,
157 KTX_UNSIGNED_INT_8_8_8_8 = 0x8035,
158 KTX_UNSIGNED_INT_10_10_10_2 = 0x8036,
159 KTX_UNSIGNED_BYTE_2_3_3_REV = 0x8362,
160 KTX_UNSIGNED_SHORT_5_6_5 = 0x8363,
161 KTX_UNSIGNED_SHORT_5_6_5_REV = 0x8364,
162 KTX_UNSIGNED_SHORT_4_4_4_4_REV = 0x8365,
163 KTX_UNSIGNED_SHORT_1_5_5_5_REV = 0x8366,
164 KTX_UNSIGNED_INT_8_8_8_8_REV = 0x8367,
165 KTX_UNSIGNED_INT_2_10_10_10_REV = 0x8368,
166 KTX_UNSIGNED_INT_24_8 = 0x84FA,
167 KTX_UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
168 KTX_UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
169 KTX_FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
170};
171
172enum KTXFormat {
173 KTX_ALPHA = 0x1906,
174 KTX_ALPHA12 = 0x803D,
175 KTX_ALPHA16 = 0x803E,
176 KTX_ALPHA16_SNORM = 0x9018,
177 KTX_ALPHA4 = 0x803B,
178 KTX_ALPHA8 = 0x803C,
179 KTX_ALPHA8_SNORM = 0x9014,
180 KTX_ALPHA_SNORM = 0x9010,
181 KTX_BGR = 0x80E0,
182 KTX_BGR_INTEGER = 0x8D9A,
183 KTX_BGRA = 0x80E1,
184 KTX_BGRA_INTEGER = 0x8D9B,
185 KTX_BLUE = 0x1905,
186 KTX_BLUE_INTEGER = 0x8D96,
187 KTX_COLOR_INDEX = 0x1900,
188 KTX_DEPTH24_STENCIL8 = 0x88F0,
189 KTX_DEPTH32F_STENCIL8 = 0x8CAD,
190 KTX_DEPTH_COMPONENT = 0x1902,
191 KTX_DEPTH_COMPONENT16 = 0x81A5,
192 KTX_DEPTH_COMPONENT24 = 0x81A6,
193 KTX_DEPTH_COMPONENT32 = 0x81A7,
194 KTX_DEPTH_COMPONENT32F = 0x8CAC,
195 KTX_DEPTH_STENCIL = 0x84F9,
196 KTX_GREEN = 0x1904,
197 KTX_GREEN_INTEGER = 0x8D95,
198 KTX_INTENSITY = 0x8049,
199 KTX_INTENSITY12 = 0x804C,
200 KTX_INTENSITY16 = 0x804D,
201 KTX_INTENSITY16_SNORM = 0x901B,
202 KTX_INTENSITY4 = 0x804A,
203 KTX_INTENSITY8 = 0x804B,
204 KTX_INTENSITY8_SNORM = 0x9017,
205 KTX_INTENSITY_SNORM = 0x9013,
206 KTX_LUMINANCE = 0x1909,
207 KTX_LUMINANCE12 = 0x8041,
208 KTX_LUMINANCE12_ALPHA12 = 0x8047,
209 KTX_LUMINANCE12_ALPHA4 = 0x8046,
210 KTX_LUMINANCE16 = 0x8042,
211 KTX_LUMINANCE16_ALPHA16 = 0x8048,
212 KTX_LUMINANCE16_ALPHA16_SNORM = 0x901A,
213 KTX_LUMINANCE16_SNORM = 0x9019,
214 KTX_LUMINANCE4 = 0x803F,
215 KTX_LUMINANCE4_ALPHA4 = 0x8043,
216 KTX_LUMINANCE6_ALPHA2 = 0x8044,
217 KTX_LUMINANCE8 = 0x8040,
218 KTX_LUMINANCE8_ALPHA8 = 0x8045,
219 KTX_LUMINANCE8_ALPHA8_SNORM = 0x9016,
220 KTX_LUMINANCE8_SNORM = 0x9015,
221 KTX_LUMINANCE_ALPHA = 0x190A,
222 KTX_LUMINANCE_ALPHA_SNORM = 0x9012,
223 KTX_LUMINANCE_SNORM = 0x9011,
224 KTX_R11F_G11F_B10F = 0x8C3A,
225 KTX_R16 = 0x822A,
226 KTX_R16_SNORM = 0x8F98,
227 KTX_R16F = 0x822D,
228 KTX_R16I = 0x8233,
229 KTX_R16UI = 0x8234,
230 KTX_R32F = 0x822E,
231 KTX_R32I = 0x8235,
232 KTX_R32UI = 0x8236,
233 KTX_R3_G3_B2 = 0x2A10,
234 KTX_R8 = 0x8229,
235 KTX_R8_SNORM = 0x8F94,
236 KTX_R8I = 0x8231,
237 KTX_R8UI = 0x8232,
238 KTX_RED = 0x1903,
239 KTX_RED_INTEGER = 0x8D94,
240 KTX_RED_SNORM = 0x8F90,
241 KTX_RG = 0x8227,
242 KTX_RG16 = 0x822C,
243 KTX_RG16_SNORM = 0x8F99,
244 KTX_RG16F = 0x822F,
245 KTX_RG16I = 0x8239,
246 KTX_RG16UI = 0x823A,
247 KTX_RG32F = 0x8230,
248 KTX_RG32I = 0x823B,
249 KTX_RG32UI = 0x823C,
250 KTX_RG8 = 0x822B,
251 KTX_RG8_SNORM = 0x8F95,
252 KTX_RG8I = 0x8237,
253 KTX_RG8UI = 0x8238,
254 KTX_RG_INTEGER = 0x8228,
255 KTX_RG_SNORM = 0x8F91,
256 KTX_RGB = 0x1907,
257 KTX_RGB10 = 0x8052,
258 KTX_RGB10_A2 = 0x8059,
259 KTX_RGB12 = 0x8053,
260 KTX_RGB16 = 0x8054,
261 KTX_RGB16_SNORM = 0x8F9A,
262 KTX_RGB16F = 0x881B,
263 KTX_RGB16I = 0x8D89,
264 KTX_RGB16UI = 0x8D77,
265 KTX_RGB2 = 0x804E,
266 KTX_RGB32F = 0x8815,
267 KTX_RGB32I = 0x8D83,
268 KTX_RGB32UI = 0x8D71,
269 KTX_RGB4 = 0x804F,
270 KTX_RGB5 = 0x8050,
271 KTX_RGB5_A1 = 0x8057,
272 KTX_RGB8 = 0x8051,
273 KTX_RGB8_SNORM = 0x8F96,
274 KTX_RGB8I = 0x8D8F,
275 KTX_RGB8UI = 0x8D7D,
276 KTX_RGB9_E5 = 0x8C3D,
277 KTX_RGB_INTEGER = 0x8D98,
278 KTX_RGB_SNORM = 0x8F92,
279 KTX_RGBA = 0x1908,
280 KTX_RGBA12 = 0x805A,
281 KTX_RGBA16 = 0x805B,
282 KTX_RGBA16_SNORM = 0x8F9B,
283 KTX_RGBA16F = 0x881A,
284 KTX_RGBA16I = 0x8D88,
285 KTX_RGBA16UI = 0x8D76,
286 KTX_RGBA2 = 0x8055,
287 KTX_RGBA32F = 0x8814,
288 KTX_RGBA32I = 0x8D82,
289 KTX_RGBA32UI = 0x8D70,
290 KTX_RGBA4 = 0x8056,
291 KTX_RGBA8 = 0x8058,
292 KTX_RGBA8_SNORM = 0x8F97,
293 KTX_RGBA8I = 0x8D8E,
294 KTX_RGBA8UI = 0x8D7C,
295 KTX_RGBA_INTEGER = 0x8D99,
296 KTX_RGBA_SNORM = 0x8F93,
297 KTX_SLUMINANCE = 0x8C46,
298 KTX_SLUMINANCE8 = 0x8C47,
299 KTX_SLUMINANCE8_ALPHA8 = 0x8C45,
300 KTX_SLUMINANCE_ALPHA = 0x8C44,
301 KTX_SRGB = 0x8C40,
302 KTX_SRGB8 = 0x8C41,
303 KTX_SRGB8_ALPHA8 = 0x8C43,
304 KTX_SRGB_ALPHA = 0x8C42,
305 KTX_STENCIL_INDEX = 0x1901,
306 KTX_STENCIL_INDEX1 = 0x8D46,
307 KTX_STENCIL_INDEX16 = 0x8D49,
308 KTX_STENCIL_INDEX4 = 0x8D47,
309 KTX_STENCIL_INDEX8 = 0x8D48,
310};
311
312enum KTXCompressedFormat {
313 KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2 = 0x8C72,
314 KTX_COMPRESSED_LUMINANCE_LATC1 = 0x8C70,
315 KTX_COMPRESSED_R11_EAC = 0x9270,
316 KTX_COMPRESSED_RED = 0x8225,
317 KTX_COMPRESSED_RED_RGTC1 = 0x8DBB,
318 KTX_COMPRESSED_RG = 0x8226,
319 KTX_COMPRESSED_RG11_EAC = 0x9272,
320 KTX_COMPRESSED_RG_RGTC2 = 0x8DBD,
321 KTX_COMPRESSED_RGB = 0x84ED,
322 KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
323 KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276,
324 KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
325 KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
326 KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
327 KTX_COMPRESSED_RGB_PVRTC_2BPPV1_IMG = 0x8C01,
328 KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
329 KTX_COMPRESSED_RGB_S3TC_DXT1 = 0x83F0,
330 KTX_COMPRESSED_RGBA = 0x84EE,
331 KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
332 KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
333 KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
334 KTX_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG = 0x8C03,
335 KTX_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG = 0x9137,
336 KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
337 KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
338 KTX_COMPRESSED_RGBA_S3TC_DXT1 = 0x83F1,
339 KTX_COMPRESSED_RGBA_S3TC_DXT3 = 0x83F2,
340 KTX_COMPRESSED_RGBA_S3TC_DXT5 = 0x83F3,
341 KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 = 0x8C73,
342 KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1 = 0x8C71,
343 KTX_COMPRESSED_SIGNED_R11_EAC = 0x9271,
344 KTX_COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
345 KTX_COMPRESSED_SIGNED_RG11_EAC = 0x9273,
346 KTX_COMPRESSED_SIGNED_RG_RGTC2 = 0x8DBE,
347 KTX_COMPRESSED_SLUMINANCE = 0x8C4A,
348 KTX_COMPRESSED_SLUMINANCE_ALPHA = 0x8C4B,
349 KTX_COMPRESSED_SRGB = 0x8C48,
350 KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279,
351 KTX_COMPRESSED_SRGB8_ETC2 = 0x9275,
352 KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277,
353 KTX_COMPRESSED_SRGB_ALPHA = 0x8C49,
354 KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
355 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1 = 0x8A56,
356 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2 = 0x93F0,
357 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1 = 0x8A57,
358 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2 = 0x93F1,
359 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1 = 0x8C4D,
360 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3 = 0x8C4E,
361 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5 = 0x8C4F,
362 KTX_COMPRESSED_SRGB_PVRTC_2BPPV1 = 0x8A54,
363 KTX_COMPRESSED_SRGB_PVRTC_4BPPV1 = 0x8A55,
364 KTX_COMPRESSED_SRGB_S3TC_DXT1 = 0x8C4C,
365 KTX_ETC1_RGB8 = 0x8D64,
366 KTX_ETC1_SRGB8 = 0x88EE,
367};
368
369/**
370 * Constructs an empty texture. The default is to set up the texture as an
371 * empty 2-d texture; follow up with one of the variants of setup_texture() if
372 * this is not what you want.
373 */
375Texture(const string &name) :
376 Namable(name),
377 _lock(name),
378 _cvar(_lock)
379{
380 _reloading = false;
381
382 CDWriter cdata(_cycler, true);
383 do_set_format(cdata, F_rgb);
384 do_set_component_type(cdata, T_unsigned_byte);
385}
386
387/**
388 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
389 */
391Texture(const Texture &copy) :
392 Namable(copy),
393 _cycler(copy._cycler),
394 _lock(copy.get_name()),
395 _cvar(_lock)
396{
397 _reloading = false;
398}
399
400/**
401 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
402 */
403void Texture::
404operator = (const Texture &copy) {
405 Namable::operator = (copy);
406 _cycler = copy._cycler;
407}
408
409/**
410 *
411 */
412Texture::
413~Texture() {
414 release_all();
415 nassertv(!_reloading);
416}
417
418/**
419 * Generates a special cube map image in the texture that can be used to apply
420 * bump mapping effects: for each texel in the cube map that is indexed by the
421 * 3-d texture coordinates (x, y, z), the resulting value is the normalized
422 * vector (x, y, z) (compressed from -1..1 into 0..1).
423 */
426 CDWriter cdata(_cycler, true);
427 do_setup_texture(cdata, TT_cube_map, size, size, 6, T_unsigned_byte, F_rgb);
428 PTA_uchar image = do_make_ram_image(cdata);
429 cdata->_keep_ram_image = true;
430
431 cdata->inc_image_modified();
432 cdata->inc_properties_modified();
433
434 PN_stdfloat half_size = (PN_stdfloat)size * 0.5f;
435 PN_stdfloat center = half_size - 0.5f;
436
437 LMatrix4 scale
438 (127.5f, 0.0f, 0.0f, 0.0f,
439 0.0f, 127.5f, 0.0f, 0.0f,
440 0.0f, 0.0f, 127.5f, 0.0f,
441 127.5f, 127.5f, 127.5f, 1.0f);
442
443 unsigned char *p = image;
444 int xi, yi;
445
446 // Page 0: positive X.
447 for (yi = 0; yi < size; ++yi) {
448 for (xi = 0; xi < size; ++xi) {
449 LVector3 vec(half_size, center - yi, center - xi);
450 vec.normalize();
451 vec = scale.xform_point(vec);
452
453 *p++ = (unsigned char)vec[2];
454 *p++ = (unsigned char)vec[1];
455 *p++ = (unsigned char)vec[0];
456 }
457 }
458
459 // Page 1: negative X.
460 for (yi = 0; yi < size; ++yi) {
461 for (xi = 0; xi < size; ++xi) {
462 LVector3 vec(-half_size, center - yi, xi - center);
463 vec.normalize();
464 vec = scale.xform_point(vec);
465 *p++ = (unsigned char)vec[2];
466 *p++ = (unsigned char)vec[1];
467 *p++ = (unsigned char)vec[0];
468 }
469 }
470
471 // Page 2: positive Y.
472 for (yi = 0; yi < size; ++yi) {
473 for (xi = 0; xi < size; ++xi) {
474 LVector3 vec(xi - center, half_size, yi - center);
475 vec.normalize();
476 vec = scale.xform_point(vec);
477 *p++ = (unsigned char)vec[2];
478 *p++ = (unsigned char)vec[1];
479 *p++ = (unsigned char)vec[0];
480 }
481 }
482
483 // Page 3: negative Y.
484 for (yi = 0; yi < size; ++yi) {
485 for (xi = 0; xi < size; ++xi) {
486 LVector3 vec(xi - center, -half_size, center - yi);
487 vec.normalize();
488 vec = scale.xform_point(vec);
489 *p++ = (unsigned char)vec[2];
490 *p++ = (unsigned char)vec[1];
491 *p++ = (unsigned char)vec[0];
492 }
493 }
494
495 // Page 4: positive Z.
496 for (yi = 0; yi < size; ++yi) {
497 for (xi = 0; xi < size; ++xi) {
498 LVector3 vec(xi - center, center - yi, half_size);
499 vec.normalize();
500 vec = scale.xform_point(vec);
501 *p++ = (unsigned char)vec[2];
502 *p++ = (unsigned char)vec[1];
503 *p++ = (unsigned char)vec[0];
504 }
505 }
506
507 // Page 5: negative Z.
508 for (yi = 0; yi < size; ++yi) {
509 for (xi = 0; xi < size; ++xi) {
510 LVector3 vec(center - xi, center - yi, -half_size);
511 vec.normalize();
512 vec = scale.xform_point(vec);
513 *p++ = (unsigned char)vec[2];
514 *p++ = (unsigned char)vec[1];
515 *p++ = (unsigned char)vec[0];
516 }
517 }
518}
519
520/**
521 * Generates a special 256x1 1-d texture that can be used to apply an
522 * arbitrary alpha scale to objects by judicious use of texture matrix. The
523 * texture is a gradient, with an alpha of 0 on the left (U = 0), and 255 on
524 * the right (U = 1).
525 */
528 CDWriter cdata(_cycler, true);
529 do_setup_texture(cdata, TT_1d_texture, 256, 1, 1, T_unsigned_byte, F_alpha);
530 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
531 cdata->_default_sampler.set_minfilter(SamplerState::FT_nearest);
532 cdata->_default_sampler.set_magfilter(SamplerState::FT_nearest);
533
534 cdata->_compression = CM_off;
535
536 cdata->inc_image_modified();
537 cdata->inc_properties_modified();
538
539 PTA_uchar image = do_make_ram_image(cdata);
540 cdata->_keep_ram_image = true;
541
542 unsigned char *p = image;
543 for (int xi = 0; xi < 256; ++xi) {
544 *p++ = xi;
545 }
546}
547
548/**
549 * Reads the named filename into the texture.
550 */
552read(const Filename &fullpath, const LoaderOptions &options) {
553 CDWriter cdata(_cycler, true);
554 do_clear(cdata);
555 cdata->inc_properties_modified();
556 cdata->inc_image_modified();
557 return do_read(cdata, fullpath, Filename(), 0, 0, 0, 0, false, false,
558 options, nullptr);
559}
560
561/**
562 * Combine a 3-component image with a grayscale image to get a 4-component
563 * image.
564 *
565 * See the description of the full-parameter read() method for the meaning of
566 * the primary_file_num_channels and alpha_file_channel parameters.
567 */
569read(const Filename &fullpath, const Filename &alpha_fullpath,
570 int primary_file_num_channels, int alpha_file_channel,
571 const LoaderOptions &options) {
572 CDWriter cdata(_cycler, true);
573 do_clear(cdata);
574 cdata->inc_properties_modified();
575 cdata->inc_image_modified();
576 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
577 alpha_file_channel, 0, 0, false, false,
578 options, nullptr);
579}
580
581/**
582 * Reads a single file into a single page or mipmap level, or automatically
583 * reads a series of files into a series of pages and/or mipmap levels.
584 *
585 * See the description of the full-parameter read() method for the meaning of
586 * the various parameters.
587 */
589read(const Filename &fullpath, int z, int n,
590 bool read_pages, bool read_mipmaps,
591 const LoaderOptions &options) {
592 CDWriter cdata(_cycler, true);
593 cdata->inc_properties_modified();
594 cdata->inc_image_modified();
595 return do_read(cdata, fullpath, Filename(), 0, 0, z, n, read_pages, read_mipmaps,
596 options, nullptr);
597}
598
599/**
600 * Reads the texture from the indicated filename. If
601 * primary_file_num_channels is not 0, it specifies the number of components
602 * to downgrade the image to if it is greater than this number.
603 *
604 * If the filename has the extension .txo, this implicitly reads a texture
605 * object instead of a filename (which replaces all of the texture
606 * properties). In this case, all the rest of the parameters are ignored, and
607 * the filename should not contain any hash marks; just the one named file
608 * will be read, since a single .txo file can contain all pages and mipmaps
609 * necessary to define a texture.
610 *
611 * If alpha_fullpath is not empty, it specifies the name of a file from which
612 * to retrieve the alpha. In this case, alpha_file_channel represents the
613 * numeric channel of this image file to use as the resulting texture's alpha
614 * channel; usually, this is 0 to indicate the grayscale combination of r, g,
615 * b; or it may be a one-based channel number, e.g. 1 for the red channel, 2
616 * for the green channel, and so on.
617 *
618 * If read pages is false, then z indicates the page number into which this
619 * image will be assigned. Normally this is 0 for the first (or only) page of
620 * the texture. 3-D textures have one page for each level of depth, and cube
621 * map textures always have six pages.
622 *
623 * If read_pages is true, multiple images will be read at once, one for each
624 * page of a cube map or a 3-D texture. In this case, the filename should
625 * contain a sequence of one or more hash marks ("#") which will be filled in
626 * with the z value of each page, zero-based. In this case, the z parameter
627 * indicates the maximum z value that will be loaded, or 0 to load all
628 * filenames that exist.
629 *
630 * If read_mipmaps is false, then n indicates the mipmap level to which this
631 * image will be assigned. Normally this is 0 for the base texture image, but
632 * it is possible to load custom mipmap levels into the later images. After
633 * the base texture image is loaded (thus defining the size of the texture),
634 * you can call get_expected_num_mipmap_levels() to determine the maximum
635 * sensible value for n.
636 *
637 * If read_mipmaps is true, multiple images will be read as above, but this
638 * time the images represent the different mipmap levels of the texture image.
639 * In this case, the n parameter indicates the maximum n value that will be
640 * loaded, or 0 to load all filenames that exist (up to the expected number of
641 * mipmap levels).
642 *
643 * If both read_pages and read_mipmaps is true, then both sequences will be
644 * read; the filename should contain two sequences of hash marks, separated by
645 * some character such as a hyphen, underscore, or dot. The first hash mark
646 * sequence will be filled in with the mipmap level, while the second hash
647 * mark sequence will be the page index.
648 *
649 * This method implicitly sets keep_ram_image to false.
650 */
652read(const Filename &fullpath, const Filename &alpha_fullpath,
653 int primary_file_num_channels, int alpha_file_channel,
654 int z, int n, bool read_pages, bool read_mipmaps,
655 BamCacheRecord *record,
656 const LoaderOptions &options) {
657 CDWriter cdata(_cycler, true);
658 cdata->inc_properties_modified();
659 cdata->inc_image_modified();
660 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
661 alpha_file_channel, z, n, read_pages, read_mipmaps,
662 options, record);
663}
664
665/**
666 * Estimates the amount of texture memory that will be consumed by loading
667 * this texture. This returns a value that is not specific to any particular
668 * graphics card or driver; it tries to make a reasonable assumption about how
669 * a driver will load the texture. It does not account for texture
670 * compression or anything fancy. This is mainly useful for debugging and
671 * reporting purposes.
672 *
673 * Returns a value in bytes.
674 */
677 CDReader cdata(_cycler);
678 size_t pixels = cdata->_x_size * cdata->_y_size * cdata->_z_size;
679
680 size_t bpp = 0;
681 switch (cdata->_format) {
682 case Texture::F_rgb332:
683 bpp = 1;
684 break;
685
686 case Texture::F_alpha:
687 case Texture::F_red:
688 case Texture::F_green:
689 case Texture::F_blue:
690 case Texture::F_luminance:
691 case Texture::F_sluminance:
692 case Texture::F_r8i:
693 bpp = 1;
694 break;
695
696 case Texture::F_luminance_alpha:
697 case Texture::F_luminance_alphamask:
698 case Texture::F_sluminance_alpha:
699 case Texture::F_rgba4:
700 case Texture::F_rgb5:
701 case Texture::F_rgba5:
702 case Texture::F_rg:
703 bpp = 2;
704 break;
705
706 case Texture::F_rgba:
707 case Texture::F_rgbm:
708 case Texture::F_rgb:
709 case Texture::F_srgb:
710 // Most of the above formats have only 3 bytes, but they are most likely
711 // to get padded by the driver
712 bpp = 4;
713 break;
714
715 case Texture::F_color_index:
716 case Texture::F_rgb8:
717 case Texture::F_rgba8:
718 case Texture::F_srgb_alpha:
719 case Texture::F_rgb8i:
720 case Texture::F_rgba8i:
721 bpp = 4;
722 break;
723
724 case Texture::F_depth_stencil:
725 bpp = 4;
726 break;
727
728 case Texture::F_depth_component:
729 case Texture::F_depth_component16:
730 bpp = 2;
731 break;
732
733 case Texture::F_depth_component24: // Gets padded
734 case Texture::F_depth_component32:
735 bpp = 4;
736 break;
737
738 case Texture::F_rgba12:
739 case Texture::F_rgb12:
740 bpp = 8;
741 break;
742
743 case Texture::F_rgba32:
744 case Texture::F_rgba32i:
745 bpp = 16;
746 break;
747
748 case Texture::F_r16:
749 case Texture::F_r16i:
750 case Texture::F_rg8i:
751 bpp = 2;
752 break;
753 case Texture::F_rg16:
754 case Texture::F_rg16i:
755 bpp = 4;
756 break;
757 case Texture::F_rgb16:
758 case Texture::F_rgb16i:
759 case Texture::F_rgba16:
760 case Texture::F_rgba16i:
761 bpp = 8;
762 break;
763
764 case Texture::F_r32i:
765 case Texture::F_r32:
766 bpp = 4;
767 break;
768
769 case Texture::F_rg32:
770 case Texture::F_rg32i:
771 bpp = 8;
772 break;
773
774 case Texture::F_rgb32:
775 case Texture::F_rgb32i:
776 bpp = 16;
777 break;
778
779 case Texture::F_r11_g11_b10:
780 case Texture::F_rgb9_e5:
781 case Texture::F_rgb10_a2:
782 bpp = 4;
783 break;
784 }
785
786 if (bpp == 0) {
787 bpp = 4;
788 gobj_cat.warning() << "Unhandled format in estimate_texture_memory(): "
789 << cdata->_format << "\n";
790 }
791
792 size_t bytes = pixels * bpp;
793 if (uses_mipmaps()) {
794 bytes = (bytes * 4) / 3;
795 }
796
797 return bytes;
798}
799
800/**
801 * Records an arbitrary object in the Texture, associated with a specified
802 * key. The object may later be retrieved by calling get_aux_data() with the
803 * same key.
804 *
805 * These data objects are not recorded to a bam or txo file.
806 */
807void Texture::
808set_aux_data(const string &key, TypedReferenceCount *aux_data) {
809 MutexHolder holder(_lock);
810 _aux_data[key] = aux_data;
811}
812
813/**
814 * Removes a record previously recorded via set_aux_data().
815 */
816void Texture::
817clear_aux_data(const string &key) {
818 MutexHolder holder(_lock);
819 _aux_data.erase(key);
820}
821
822/**
823 * Returns a record previously recorded via set_aux_data(). Returns NULL if
824 * there was no record associated with the indicated key.
825 */
827get_aux_data(const string &key) const {
828 MutexHolder holder(_lock);
829 AuxData::const_iterator di;
830 di = _aux_data.find(key);
831 if (di != _aux_data.end()) {
832 return (*di).second;
833 }
834 return nullptr;
835}
836
837/**
838 * Reads the texture from a Panda texture object. This defines the complete
839 * Texture specification, including the image data as well as all texture
840 * properties. This only works if the txo file contains a static Texture
841 * image, as opposed to a subclass of Texture such as a movie texture.
842 *
843 * Pass a real filename if it is available, or empty string if it is not.
844 */
846read_txo(istream &in, const string &filename) {
847 CDWriter cdata(_cycler, true);
848 cdata->inc_properties_modified();
849 cdata->inc_image_modified();
850 return do_read_txo(cdata, in, filename);
851}
852
853/**
854 * Constructs a new Texture object from the txo file. This is similar to
855 * Texture::read_txo(), but it constructs and returns a new object, which
856 * allows it to return a subclass of Texture (for instance, a movie texture).
857 *
858 * Pass a real filename if it is available, or empty string if it is not.
859 */
860PT(Texture) Texture::
861make_from_txo(istream &in, const string &filename) {
863
864 if (!din.open(in, filename)) {
865 gobj_cat.error()
866 << "Could not read texture object: " << filename << "\n";
867 return nullptr;
868 }
869
870 string head;
871 if (!din.read_header(head, _bam_header.size())) {
872 gobj_cat.error()
873 << filename << " is not a texture object file.\n";
874 return nullptr;
875 }
876
877 if (head != _bam_header) {
878 gobj_cat.error()
879 << filename << " is not a texture object file.\n";
880 return nullptr;
881 }
882
883 BamReader reader(&din);
884 if (!reader.init()) {
885 return nullptr;
886 }
887
888 TypedWritable *object = reader.read_object();
889
890 if (object != nullptr &&
891 object->is_exact_type(BamCacheRecord::get_class_type())) {
892 // Here's a special case: if the first object in the file is a
893 // BamCacheRecord, it's really a cache data file and not a true txo file;
894 // but skip over the cache data record and let the user treat it like an
895 // ordinary txo file.
896 object = reader.read_object();
897 }
898
899 if (object == nullptr) {
900 gobj_cat.error()
901 << "Texture object " << filename << " is empty.\n";
902 return nullptr;
903
904 } else if (!object->is_of_type(Texture::get_class_type())) {
905 gobj_cat.error()
906 << "Texture object " << filename << " contains a "
907 << object->get_type() << ", not a Texture.\n";
908 return nullptr;
909 }
910
911 PT(Texture) other = DCAST(Texture, object);
912 if (!reader.resolve()) {
913 gobj_cat.error()
914 << "Unable to fully resolve texture object file.\n";
915 return nullptr;
916 }
917
918 return other;
919}
920
921/**
922 * Writes the texture to a Panda texture object. This defines the complete
923 * Texture specification, including the image data as well as all texture
924 * properties.
925 *
926 * The filename is just for reference.
927 */
929write_txo(ostream &out, const string &filename) const {
930 CDReader cdata(_cycler);
931 return do_write_txo(cdata, out, filename);
932}
933
934/**
935 * Reads the texture from a DDS file object. This is a Microsoft-defined file
936 * format; it is similar in principle to a txo object, in that it is designed
937 * to contain the texture image in a form as similar as possible to its
938 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
939 * on.
940 *
941 * As with read_txo, the filename is just for reference.
942 */
944read_dds(istream &in, const string &filename, bool header_only) {
945 CDWriter cdata(_cycler, true);
946 cdata->inc_properties_modified();
947 cdata->inc_image_modified();
948 return do_read_dds(cdata, in, filename, header_only);
949}
950
951/**
952 * Reads the texture from a KTX file object. This is a Khronos-defined file
953 * format; it is similar in principle to a dds object, in that it is designed
954 * to contain the texture image in a form as similar as possible to its
955 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
956 * on.
957 *
958 * As with read_dds, the filename is just for reference.
959 */
961read_ktx(istream &in, const string &filename, bool header_only) {
962 CDWriter cdata(_cycler, true);
963 cdata->inc_properties_modified();
964 cdata->inc_image_modified();
965 return do_read_ktx(cdata, in, filename, header_only);
966}
967
968/**
969 * Loads a texture whose filename is derived by concatenating a suffix to the
970 * filename of this texture. May return NULL, for example, if this texture
971 * doesn't have a filename.
972 */
974load_related(const InternalName *suffix) const {
975 MutexHolder holder(_lock);
976 CDReader cdata(_cycler);
977
978 RelatedTextures::const_iterator ti;
979 ti = _related_textures.find(suffix);
980 if (ti != _related_textures.end()) {
981 return (*ti).second;
982 }
983 if (cdata->_fullpath.empty()) {
984 return nullptr;
985 }
986 Filename main = cdata->_fullpath;
987 main.set_basename_wo_extension(main.get_basename_wo_extension() +
988 suffix->get_name());
989 PT(Texture) res;
990 if (!cdata->_alpha_fullpath.empty()) {
991 Filename alph = cdata->_alpha_fullpath;
993 suffix->get_name());
995 if (vfs->exists(alph)) {
996 // The alpha variant of the filename, with the suffix, exists. Use it
997 // to load the texture.
998 res = TexturePool::load_texture(main, alph,
999 cdata->_primary_file_num_channels,
1000 cdata->_alpha_file_channel, false);
1001 } else {
1002 // If the alpha variant of the filename doesn't exist, just go ahead and
1003 // load the related texture without alpha.
1004 res = TexturePool::load_texture(main);
1005 }
1006
1007 } else {
1008 // No alpha filename--just load the single file. It doesn't necessarily
1009 // have the same number of channels as this one.
1010 res = TexturePool::load_texture(main);
1011 }
1012
1013 // I'm casting away the const-ness of 'this' because this field is only a
1014 // cache.
1015 ((Texture *)this)->_related_textures.insert(RelatedTextures::value_type(suffix, res));
1016 return res;
1017}
1018
1019/**
1020 * Replaces the current system-RAM image with the new data, converting it
1021 * first if necessary from the indicated component-order format. See
1022 * get_ram_image_as() for specifications about the format. This method cannot
1023 * support compressed image data or sub-pages; use set_ram_image() for that.
1024 */
1026set_ram_image_as(CPTA_uchar image, const string &supplied_format) {
1027 CDWriter cdata(_cycler, true);
1028
1029 string format = upcase(supplied_format);
1030
1031 // Make sure we can grab something that's uncompressed.
1032 size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
1033 (size_t)cdata->_z_size * (size_t)cdata->_num_views;
1034 nassertv(image.size() == (size_t)(cdata->_component_width * format.size() * imgsize));
1035
1036 // Check if the format is already what we have internally.
1037 if ((cdata->_num_components == 1 && format.size() == 1) ||
1038 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
1039 (cdata->_num_components == 3 && format == "BGR") ||
1040 (cdata->_num_components == 4 && format == "BGRA")) {
1041 // The format string is already our format, so we just need to copy it.
1042 do_set_ram_image(cdata, image);
1043 return;
1044 }
1045
1046 // Create a new empty array that can hold our image.
1047 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * cdata->_num_components * cdata->_component_width, get_class_type());
1048
1049 // These ifs are for optimization of commonly used image types.
1050 if (cdata->_component_width == 1) {
1051 if (format == "RGBA" && cdata->_num_components == 4) {
1052 imgsize *= 4;
1053 for (int p = 0; p < imgsize; p += 4) {
1054 newdata[p + 2] = image[p ];
1055 newdata[p + 1] = image[p + 1];
1056 newdata[p ] = image[p + 2];
1057 newdata[p + 3] = image[p + 3];
1058 }
1059 do_set_ram_image(cdata, newdata);
1060 return;
1061 }
1062 if (format == "RGB" && cdata->_num_components == 3) {
1063 imgsize *= 3;
1064 for (int p = 0; p < imgsize; p += 3) {
1065 newdata[p + 2] = image[p ];
1066 newdata[p + 1] = image[p + 1];
1067 newdata[p ] = image[p + 2];
1068 }
1069 do_set_ram_image(cdata, newdata);
1070 return;
1071 }
1072 if (format == "A" && cdata->_num_components != 3) {
1073 // We can generally rely on alpha to be the last component.
1074 int component = cdata->_num_components - 1;
1075 for (int p = 0; p < imgsize; ++p) {
1076 newdata[component] = image[p];
1077 }
1078 do_set_ram_image(cdata, newdata);
1079 return;
1080 }
1081 for (int p = 0; p < imgsize; ++p) {
1082 for (uchar s = 0; s < format.size(); ++s) {
1083 signed char component = -1;
1084 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1085 component = 0;
1086 } else if (format.at(s) == 'G') {
1087 component = 1;
1088 } else if (format.at(s) == 'R') {
1089 component = 2;
1090 } else if (format.at(s) == 'A') {
1091 if (cdata->_num_components != 3) {
1092 component = cdata->_num_components - 1;
1093 } else {
1094 // Ignore.
1095 }
1096 } else if (format.at(s) == '0') {
1097 // Ignore.
1098 } else if (format.at(s) == '1') {
1099 // Ignore.
1100 } else {
1101 gobj_cat.error() << "Unexpected component character '"
1102 << format.at(s) << "', expected one of RGBA!\n";
1103 return;
1104 }
1105 if (component >= 0) {
1106 newdata[p * cdata->_num_components + component] = image[p * format.size() + s];
1107 }
1108 }
1109 }
1110 do_set_ram_image(cdata, newdata);
1111 return;
1112 }
1113 for (int p = 0; p < imgsize; ++p) {
1114 for (uchar s = 0; s < format.size(); ++s) {
1115 signed char component = -1;
1116 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1117 component = 0;
1118 } else if (format.at(s) == 'G') {
1119 component = 1;
1120 } else if (format.at(s) == 'R') {
1121 component = 2;
1122 } else if (format.at(s) == 'A') {
1123 if (cdata->_num_components != 3) {
1124 component = cdata->_num_components - 1;
1125 } else {
1126 // Ignore.
1127 }
1128 } else if (format.at(s) == '0') {
1129 // Ignore.
1130 } else if (format.at(s) == '1') {
1131 // Ignore.
1132 } else {
1133 gobj_cat.error() << "Unexpected component character '"
1134 << format.at(s) << "', expected one of RGBA!\n";
1135 return;
1136 }
1137 if (component >= 0) {
1138 memcpy((void*)(newdata + (p * cdata->_num_components + component) * cdata->_component_width),
1139 (void*)(image + (p * format.size() + s) * cdata->_component_width),
1140 cdata->_component_width);
1141 }
1142 }
1143 }
1144 do_set_ram_image(cdata, newdata);
1145 return;
1146}
1147
1148/**
1149 * Returns the flag that indicates whether this Texture is eligible to have
1150 * its main RAM copy of the texture memory dumped when the texture is prepared
1151 * for rendering. See set_keep_ram_image().
1152 */
1153bool Texture::
1154get_keep_ram_image() const {
1155 CDReader cdata(_cycler);
1156 return cdata->_keep_ram_image;
1157}
1158
1159/**
1160 * Returns true if there is enough information in this Texture object to write
1161 * it to the bam cache successfully, false otherwise. For most textures, this
1162 * is the same as has_ram_image().
1163 */
1164bool Texture::
1165is_cacheable() const {
1166 CDReader cdata(_cycler);
1167 return do_has_bam_rawdata(cdata);
1168}
1169
1170/**
1171 * Returns the number of contiguous mipmap levels that exist in RAM, up until
1172 * the first gap in the sequence. It is guaranteed that at least mipmap
1173 * levels [0, get_num_ram_mipmap_images()) exist.
1174 *
1175 * The number returned will never exceed the number of required mipmap images
1176 * based on the size of the texture and its filter mode.
1177 *
1178 * This method is different from get_num_ram_mipmap_images() in that it
1179 * returns only the number of mipmap levels that can actually be usefully
1180 * loaded, regardless of the actual number that may be stored.
1181 */
1182int Texture::
1184 CDReader cdata(_cycler);
1185 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
1186 // If we don't even have a base image, the answer is none.
1187 return 0;
1188 }
1189 if (!uses_mipmaps()) {
1190 // If we have a base image and don't require mipmapping, the answer is 1.
1191 return 1;
1192 }
1193
1194 // Check that we have enough mipmap levels to meet the size requirements.
1195 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
1196 int n = 0;
1197 int x = 1;
1198 while (x < size) {
1199 x = (x << 1);
1200 ++n;
1201 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
1202 return n;
1203 }
1204 }
1205
1206 ++n;
1207 return n;
1208}
1209
1210/**
1211 * Returns the system-RAM image data associated with the nth mipmap level, if
1212 * present. Returns NULL if the nth mipmap level is not present.
1213 */
1215get_ram_mipmap_image(int n) const {
1216 CDReader cdata(_cycler);
1217 if (n < (int)cdata->_ram_images.size() && !cdata->_ram_images[n]._image.empty()) {
1218 return cdata->_ram_images[n]._image;
1219 }
1220 return CPTA_uchar(get_class_type());
1221}
1222
1223/**
1224 * Similiar to get_ram_mipmap_image(), however, in this case the void pointer
1225 * for the given ram image is returned. This will be NULL unless it has been
1226 * explicitly set.
1227 */
1229get_ram_mipmap_pointer(int n) const {
1230 CDReader cdata(_cycler);
1231 if (n < (int)cdata->_ram_images.size()) {
1232 return cdata->_ram_images[n]._pointer_image;
1233 }
1234 return nullptr;
1235}
1236
1237/**
1238 * Sets an explicit void pointer as the texture's mipmap image for the
1239 * indicated level. This is a special call to direct a texture to reference
1240 * some external image location, for instance from a webcam input.
1241 *
1242 * The texture will henceforth reference this pointer directly, instead of its
1243 * own internal storage; the user is responsible for ensuring the data at this
1244 * address remains allocated and valid, and in the correct format, during the
1245 * lifetime of the texture.
1246 */
1248set_ram_mipmap_pointer(int n, void *image, size_t page_size) {
1249 CDWriter cdata(_cycler, true);
1250 nassertv(cdata->_ram_image_compression != CM_off || do_get_expected_ram_mipmap_image_size(cdata, n));
1251
1252 while (n >= (int)cdata->_ram_images.size()) {
1253 cdata->_ram_images.push_back(RamImage());
1254 }
1255
1256 cdata->_ram_images[n]._page_size = page_size;
1257 // _ram_images[n]._image.clear(); wtf is going on?!
1258 cdata->_ram_images[n]._pointer_image = image;
1259 cdata->inc_image_modified();
1260}
1261
1262/**
1263 * Accepts a raw pointer cast as an int, which is then passed to
1264 * set_ram_mipmap_pointer(); see the documentation for that method.
1265 *
1266 * This variant is particularly useful to set an external pointer from a
1267 * language like Python, which doesn't support void pointers directly.
1268 */
1270set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size) {
1271 set_ram_mipmap_pointer(n, (void*)pointer, (size_t)page_size);
1272}
1273
1274/**
1275 * Discards the current system-RAM image for the nth mipmap level.
1276 */
1279 CDWriter cdata(_cycler, true);
1280 if (n >= (int)cdata->_ram_images.size()) {
1281 return;
1282 }
1283 cdata->_ram_images[n]._page_size = 0;
1284 cdata->_ram_images[n]._image.clear();
1285 cdata->_ram_images[n]._pointer_image = nullptr;
1286}
1287
1288/**
1289 * Returns a modifiable pointer to the internal "simple" texture image. See
1290 * set_simple_ram_image().
1291 */
1292PTA_uchar Texture::
1294 CDWriter cdata(_cycler, true);
1295 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1296 return cdata->_simple_ram_image._image;
1297}
1298
1299/**
1300 * Creates an empty array for the simple ram image of the indicated size, and
1301 * returns a modifiable pointer to the new array. See set_simple_ram_image().
1302 */
1303PTA_uchar Texture::
1304new_simple_ram_image(int x_size, int y_size) {
1305 CDWriter cdata(_cycler, true);
1306 nassertr(cdata->_texture_type == TT_2d_texture, PTA_uchar());
1307 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1308
1309 cdata->_simple_x_size = x_size;
1310 cdata->_simple_y_size = y_size;
1311 cdata->_simple_ram_image._image = PTA_uchar::empty_array(expected_page_size);
1312 cdata->_simple_ram_image._page_size = expected_page_size;
1313 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1314 cdata->inc_simple_image_modified();
1315
1316 return cdata->_simple_ram_image._image;
1317}
1318
1319/**
1320 * Computes the "simple" ram image by loading the main RAM image, if it is not
1321 * already available, and reducing it to 16x16 or smaller. This may be an
1322 * expensive operation.
1323 */
1326 CDWriter cdata(_cycler, true);
1327
1328 if (cdata->_texture_type != TT_2d_texture ||
1329 cdata->_ram_image_compression != CM_off) {
1330 return;
1331 }
1332
1333 PNMImage pnmimage;
1334 if (!do_store_one(cdata, pnmimage, 0, 0)) {
1335 return;
1336 }
1337
1338 // Start at the suggested size from the config file.
1339 int x_size = simple_image_size.get_word(0);
1340 int y_size = simple_image_size.get_word(1);
1341
1342 // Limit it to no larger than the source image, and also make it a power of
1343 // two.
1344 x_size = down_to_power_2(min(x_size, cdata->_x_size));
1345 y_size = down_to_power_2(min(y_size, cdata->_y_size));
1346
1347 // Generate a reduced image of that size.
1348 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels());
1349 scaled.quick_filter_from(pnmimage);
1350
1351 // Make sure the reduced image has 4 components, by convention.
1352 if (!scaled.has_alpha()) {
1353 scaled.add_alpha();
1354 scaled.alpha_fill(1.0);
1355 }
1356 scaled.set_num_channels(4);
1357
1358 // Now see if we can go even smaller.
1359 bool did_anything;
1360 do {
1361 did_anything = false;
1362
1363 // Try to reduce X.
1364 if (x_size > 1) {
1365 int new_x_size = (x_size >> 1);
1366 PNMImage smaller(new_x_size, y_size, 4);
1367 smaller.quick_filter_from(scaled);
1368 PNMImage bigger(x_size, y_size, 4);
1369 bigger.quick_filter_from(smaller);
1370
1371 if (compare_images(scaled, bigger)) {
1372 scaled.take_from(smaller);
1373 x_size = new_x_size;
1374 did_anything = true;
1375 }
1376 }
1377
1378 // Try to reduce Y.
1379 if (y_size > 1) {
1380 int new_y_size = (y_size >> 1);
1381 PNMImage smaller(x_size, new_y_size, 4);
1382 smaller.quick_filter_from(scaled);
1383 PNMImage bigger(x_size, y_size, 4);
1384 bigger.quick_filter_from(smaller);
1385
1386 if (compare_images(scaled, bigger)) {
1387 scaled.take_from(smaller);
1388 y_size = new_y_size;
1389 did_anything = true;
1390 }
1391 }
1392 } while (did_anything);
1393
1394 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1395 PTA_uchar image = PTA_uchar::empty_array(expected_page_size, get_class_type());
1396 convert_from_pnmimage(image, expected_page_size, x_size, 0, 0, 0, scaled, 4, 1);
1397
1398 do_set_simple_ram_image(cdata, image, x_size, y_size);
1399 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1400}
1401
1402/**
1403 * Returns a TexturePeeker object that can be used to examine the individual
1404 * texels stored within this Texture by (u, v) coordinate.
1405 *
1406 * If the texture has a ram image resident, that image is used. If it does
1407 * not have a full ram image but does have a simple_ram_image resident, that
1408 * image is used instead. If neither image is resident the full image is
1409 * reloaded.
1410 *
1411 * Returns NULL if the texture cannot find an image to load, or the texture
1412 * format is incompatible.
1413 */
1415peek() {
1416 CDWriter cdata(_cycler, unlocked_ensure_ram_image(true));
1417
1418 PT(TexturePeeker) peeker = new TexturePeeker(this, cdata);
1419 if (peeker->is_valid()) {
1420 return peeker;
1421 }
1422
1423 return nullptr;
1424}
1425
1426/**
1427 * Indicates that the texture should be enqueued to be prepared in the
1428 * indicated prepared_objects at the beginning of the next frame. This will
1429 * ensure the texture is already loaded into texture memory if it is expected
1430 * to be rendered soon.
1431 *
1432 * Use this function instead of prepare_now() to preload textures from a user
1433 * interface standpoint.
1434 */
1435PT(AsyncFuture) Texture::
1436prepare(PreparedGraphicsObjects *prepared_objects) {
1437 return prepared_objects->enqueue_texture_future(this);
1438}
1439
1440/**
1441 * Returns true if the texture has already been prepared or enqueued for
1442 * preparation on the indicated GSG, false otherwise.
1443 */
1445is_prepared(PreparedGraphicsObjects *prepared_objects) const {
1446 MutexHolder holder(_lock);
1447 PreparedViews::const_iterator pvi;
1448 pvi = _prepared_views.find(prepared_objects);
1449 if (pvi != _prepared_views.end()) {
1450 return true;
1451 }
1452 return prepared_objects->is_texture_queued(this);
1453}
1454
1455/**
1456 * Returns true if the texture needs to be re-loaded onto the indicated GSG,
1457 * either because its image data is out-of-date, or because it's not fully
1458 * prepared now.
1459 */
1461was_image_modified(PreparedGraphicsObjects *prepared_objects) const {
1462 MutexHolder holder(_lock);
1463 CDReader cdata(_cycler);
1464
1465 PreparedViews::const_iterator pvi;
1466 pvi = _prepared_views.find(prepared_objects);
1467 if (pvi != _prepared_views.end()) {
1468 const Contexts &contexts = (*pvi).second;
1469 for (int view = 0; view < cdata->_num_views; ++view) {
1470 Contexts::const_iterator ci;
1471 ci = contexts.find(view);
1472 if (ci == contexts.end()) {
1473 return true;
1474 }
1475 TextureContext *tc = (*ci).second;
1476 if (tc->was_image_modified()) {
1477 return true;
1478 }
1479 }
1480 return false;
1481 }
1482 return true;
1483}
1484
1485/**
1486 * Returns the number of bytes which the texture is reported to consume within
1487 * graphics memory, for the indicated GSG. This may return a nonzero value
1488 * even if the texture is not currently resident; you should also check
1489 * get_resident() if you want to know how much space the texture is actually
1490 * consuming right now.
1491 */
1493get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const {
1494 MutexHolder holder(_lock);
1495 CDReader cdata(_cycler);
1496
1497 PreparedViews::const_iterator pvi;
1498 size_t total_size = 0;
1499 pvi = _prepared_views.find(prepared_objects);
1500 if (pvi != _prepared_views.end()) {
1501 const Contexts &contexts = (*pvi).second;
1502 for (int view = 0; view < cdata->_num_views; ++view) {
1503 Contexts::const_iterator ci;
1504 ci = contexts.find(view);
1505 if (ci != contexts.end()) {
1506 TextureContext *tc = (*ci).second;
1507 total_size += tc->get_data_size_bytes();
1508 }
1509 }
1510 }
1511
1512 return total_size;
1513}
1514
1515/**
1516 * Returns true if this Texture was rendered in the most recent frame within
1517 * the indicated GSG.
1518 */
1520get_active(PreparedGraphicsObjects *prepared_objects) const {
1521 MutexHolder holder(_lock);
1522 CDReader cdata(_cycler);
1523
1524 PreparedViews::const_iterator pvi;
1525 pvi = _prepared_views.find(prepared_objects);
1526 if (pvi != _prepared_views.end()) {
1527 const Contexts &contexts = (*pvi).second;
1528 for (int view = 0; view < cdata->_num_views; ++view) {
1529 Contexts::const_iterator ci;
1530 ci = contexts.find(view);
1531 if (ci != contexts.end()) {
1532 TextureContext *tc = (*ci).second;
1533 if (tc->get_active()) {
1534 return true;
1535 }
1536 }
1537 }
1538 }
1539 return false;
1540}
1541
1542/**
1543 * Returns true if this Texture is reported to be resident within graphics
1544 * memory for the indicated GSG.
1545 */
1547get_resident(PreparedGraphicsObjects *prepared_objects) const {
1548 MutexHolder holder(_lock);
1549 CDReader cdata(_cycler);
1550
1551 PreparedViews::const_iterator pvi;
1552 pvi = _prepared_views.find(prepared_objects);
1553 if (pvi != _prepared_views.end()) {
1554 const Contexts &contexts = (*pvi).second;
1555 for (int view = 0; view < cdata->_num_views; ++view) {
1556 Contexts::const_iterator ci;
1557 ci = contexts.find(view);
1558 if (ci != contexts.end()) {
1559 TextureContext *tc = (*ci).second;
1560 if (tc->get_resident()) {
1561 return true;
1562 }
1563 }
1564 }
1565 }
1566 return false;
1567}
1568
1569/**
1570 * Frees the texture context only on the indicated object, if it exists there.
1571 * Returns true if it was released, false if it had not been prepared.
1572 */
1574release(PreparedGraphicsObjects *prepared_objects) {
1575 MutexHolder holder(_lock);
1576 PreparedViews::iterator pvi;
1577 pvi = _prepared_views.find(prepared_objects);
1578 if (pvi != _prepared_views.end()) {
1579 Contexts temp;
1580 temp.swap((*pvi).second);
1581 Contexts::iterator ci;
1582 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1583 TextureContext *tc = (*ci).second;
1584 if (tc != nullptr) {
1585 prepared_objects->release_texture(tc);
1586 }
1587 }
1588 _prepared_views.erase(pvi);
1589 }
1590
1591 // Maybe it wasn't prepared yet, but it's about to be.
1592 return prepared_objects->dequeue_texture(this);
1593}
1594
1595/**
1596 * Frees the context allocated on all objects for which the texture has been
1597 * declared. Returns the number of contexts which have been freed.
1598 */
1600release_all() {
1601 MutexHolder holder(_lock);
1602
1603 // We have to traverse a copy of the _prepared_views list, because the
1604 // PreparedGraphicsObjects object will call clear_prepared() in response to
1605 // each release_texture(), and we don't want to be modifying the
1606 // _prepared_views list while we're traversing it.
1607 PreparedViews temp;
1608 temp.swap(_prepared_views);
1609 int num_freed = (int)temp.size();
1610
1611 PreparedViews::iterator pvi;
1612 for (pvi = temp.begin(); pvi != temp.end(); ++pvi) {
1613 PreparedGraphicsObjects *prepared_objects = (*pvi).first;
1614 Contexts temp;
1615 temp.swap((*pvi).second);
1616 Contexts::iterator ci;
1617 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1618 TextureContext *tc = (*ci).second;
1619 if (tc != nullptr) {
1620 prepared_objects->release_texture(tc);
1621 }
1622 }
1623 }
1624
1625 return num_freed;
1626}
1627
1628/**
1629 * Not to be confused with write(Filename), this method simply describes the
1630 * texture properties.
1631 */
1633write(ostream &out, int indent_level) const {
1634 CDReader cdata(_cycler);
1635 indent(out, indent_level)
1636 << cdata->_texture_type << " " << get_name();
1637 if (!cdata->_filename.empty()) {
1638 out << " (from " << cdata->_filename << ")";
1639 }
1640 out << "\n";
1641
1642 indent(out, indent_level + 2);
1643
1644 switch (cdata->_texture_type) {
1645 case TT_1d_texture:
1646 out << "1-d, " << cdata->_x_size;
1647 break;
1648
1649 case TT_2d_texture:
1650 out << "2-d, " << cdata->_x_size << " x " << cdata->_y_size;
1651 break;
1652
1653 case TT_3d_texture:
1654 out << "3-d, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1655 break;
1656
1657 case TT_2d_texture_array:
1658 out << "2-d array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1659 break;
1660
1661 case TT_cube_map:
1662 out << "cube map, " << cdata->_x_size << " x " << cdata->_y_size;
1663 break;
1664
1665 case TT_cube_map_array:
1666 out << "cube map array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1667 break;
1668
1669 case TT_buffer_texture:
1670 out << "buffer, " << cdata->_x_size;
1671 break;
1672
1673 case TT_1d_texture_array:
1674 out << "1-d array, " << cdata->_x_size << " x " << cdata->_y_size;
1675 break;
1676 }
1677
1678 if (cdata->_num_views > 1) {
1679 out << " (x " << cdata->_num_views << " views)";
1680 }
1681
1682 out << " pixels, each " << cdata->_num_components;
1683
1684 switch (cdata->_component_type) {
1685 case T_unsigned_byte:
1686 case T_byte:
1687 out << " bytes";
1688 break;
1689
1690 case T_unsigned_short:
1691 case T_short:
1692 out << " shorts";
1693 break;
1694
1695 case T_half_float:
1696 out << " half";
1697 case T_float:
1698 out << " floats";
1699 break;
1700
1701 case T_unsigned_int_24_8:
1702 case T_int:
1703 case T_unsigned_int:
1704 out << " ints";
1705 break;
1706
1707 default:
1708 break;
1709 }
1710
1711 out << ", ";
1712 switch (cdata->_format) {
1713 case F_color_index:
1714 out << "color_index";
1715 break;
1716 case F_depth_stencil:
1717 out << "depth_stencil";
1718 break;
1719 case F_depth_component:
1720 out << "depth_component";
1721 break;
1722 case F_depth_component16:
1723 out << "depth_component16";
1724 break;
1725 case F_depth_component24:
1726 out << "depth_component24";
1727 break;
1728 case F_depth_component32:
1729 out << "depth_component32";
1730 break;
1731
1732 case F_rgba:
1733 out << "rgba";
1734 break;
1735 case F_rgbm:
1736 out << "rgbm";
1737 break;
1738 case F_rgba32:
1739 out << "rgba32";
1740 break;
1741 case F_rgba16:
1742 out << "rgba16";
1743 break;
1744 case F_rgba12:
1745 out << "rgba12";
1746 break;
1747 case F_rgba8:
1748 out << "rgba8";
1749 break;
1750 case F_rgba4:
1751 out << "rgba4";
1752 break;
1753
1754 case F_rgb:
1755 out << "rgb";
1756 break;
1757 case F_rgb12:
1758 out << "rgb12";
1759 break;
1760 case F_rgb8:
1761 out << "rgb8";
1762 break;
1763 case F_rgb5:
1764 out << "rgb5";
1765 break;
1766 case F_rgba5:
1767 out << "rgba5";
1768 break;
1769 case F_rgb332:
1770 out << "rgb332";
1771 break;
1772
1773 case F_red:
1774 out << "red";
1775 break;
1776 case F_green:
1777 out << "green";
1778 break;
1779 case F_blue:
1780 out << "blue";
1781 break;
1782 case F_alpha:
1783 out << "alpha";
1784 break;
1785 case F_luminance:
1786 out << "luminance";
1787 break;
1788 case F_luminance_alpha:
1789 out << "luminance_alpha";
1790 break;
1791 case F_luminance_alphamask:
1792 out << "luminance_alphamask";
1793 break;
1794
1795 case F_r16:
1796 out << "r16";
1797 break;
1798 case F_rg16:
1799 out << "rg16";
1800 break;
1801 case F_rgb16:
1802 out << "rgb16";
1803 break;
1804
1805 case F_srgb:
1806 out << "srgb";
1807 break;
1808 case F_srgb_alpha:
1809 out << "srgb_alpha";
1810 break;
1811 case F_sluminance:
1812 out << "sluminance";
1813 break;
1814 case F_sluminance_alpha:
1815 out << "sluminance_alpha";
1816 break;
1817
1818 case F_r32i:
1819 out << "r32i";
1820 break;
1821
1822 case F_r32:
1823 out << "r32";
1824 break;
1825 case F_rg32:
1826 out << "rg32";
1827 break;
1828 case F_rgb32:
1829 out << "rgb32";
1830 break;
1831
1832 case F_r8i:
1833 out << "r8i";
1834 break;
1835 case F_rg8i:
1836 out << "rg8i";
1837 break;
1838 case F_rgb8i:
1839 out << "rgb8i";
1840 break;
1841 case F_rgba8i:
1842 out << "rgba8i";
1843 break;
1844 case F_r11_g11_b10:
1845 out << "r11_g11_b10";
1846 break;
1847 case F_rgb9_e5:
1848 out << "rgb9_e5";
1849 break;
1850 case F_rgb10_a2:
1851 out << "rgb10_a2";
1852 break;
1853
1854 case F_rg:
1855 out << "rg";
1856 break;
1857
1858 case F_r16i:
1859 out << "r16i";
1860 break;
1861 case F_rg16i:
1862 out << "rg16i";
1863 break;
1864 case F_rgb16i:
1865 out << "rgb16i";
1866 break;
1867 case F_rgba16i:
1868 out << "rgba16i";
1869 break;
1870
1871 case F_rg32i:
1872 out << "rg32i";
1873 break;
1874 case F_rgb32i:
1875 out << "rgb32i";
1876 break;
1877 case F_rgba32i:
1878 out << "rgba32i";
1879 break;
1880 }
1881
1882 if (cdata->_compression != CM_default) {
1883 out << ", compression " << cdata->_compression;
1884 }
1885 out << "\n";
1886
1887 indent(out, indent_level + 2);
1888
1889 cdata->_default_sampler.output(out);
1890
1891 if (do_has_ram_image(cdata)) {
1892 indent(out, indent_level + 2)
1893 << do_get_ram_image_size(cdata) << " bytes in ram, compression "
1894 << cdata->_ram_image_compression << "\n";
1895
1896 if (cdata->_ram_images.size() > 1) {
1897 int count = 0;
1898 size_t total_size = 0;
1899 for (size_t n = 1; n < cdata->_ram_images.size(); ++n) {
1900 if (!cdata->_ram_images[n]._image.empty()) {
1901 ++count;
1902 total_size += cdata->_ram_images[n]._image.size();
1903 } else {
1904 // Stop at the first gap.
1905 break;
1906 }
1907 }
1908 indent(out, indent_level + 2)
1909 << count
1910 << " mipmap levels also present in ram (" << total_size
1911 << " bytes).\n";
1912 }
1913
1914 } else {
1915 indent(out, indent_level + 2)
1916 << "no ram image\n";
1917 }
1918
1919 if (!cdata->_simple_ram_image._image.empty()) {
1920 indent(out, indent_level + 2)
1921 << "simple image: " << cdata->_simple_x_size << " x "
1922 << cdata->_simple_y_size << ", "
1923 << cdata->_simple_ram_image._image.size() << " bytes\n";
1924 }
1925}
1926
1927
1928/**
1929 * Changes the size of the texture, padding if necessary, and setting the pad
1930 * region as well.
1931 */
1933set_size_padded(int x, int y, int z) {
1934 CDWriter cdata(_cycler, true);
1935 if (do_get_auto_texture_scale(cdata) != ATS_none) {
1936 do_set_x_size(cdata, up_to_power_2(x));
1937 do_set_y_size(cdata, up_to_power_2(y));
1938
1939 if (cdata->_texture_type == TT_3d_texture) {
1940 // Only pad 3D textures. It does not make sense to do so for cube maps
1941 // or 2D texture arrays.
1942 do_set_z_size(cdata, up_to_power_2(z));
1943 } else {
1944 do_set_z_size(cdata, z);
1945 }
1946 } else {
1947 do_set_x_size(cdata, x);
1948 do_set_y_size(cdata, y);
1949 do_set_z_size(cdata, z);
1950 }
1951 do_set_pad_size(cdata,
1952 cdata->_x_size - x,
1953 cdata->_y_size - y,
1954 cdata->_z_size - z);
1955}
1956
1957/**
1958 * Specifies the size of the texture as it exists in its original disk file,
1959 * before any Panda scaling.
1960 */
1962set_orig_file_size(int x, int y, int z) {
1963 CDWriter cdata(_cycler, true);
1964 cdata->_orig_file_x_size = x;
1965 cdata->_orig_file_y_size = y;
1966
1967 nassertv(z == cdata->_z_size);
1968}
1969
1970/**
1971 * Creates a context for the texture on the particular GSG, if it does not
1972 * already exist. Returns the new (or old) TextureContext. This assumes that
1973 * the GraphicsStateGuardian is the currently active rendering context and
1974 * that it is ready to accept new textures. If this is not necessarily the
1975 * case, you should use prepare() instead.
1976 *
1977 * Normally, this is not called directly except by the GraphicsStateGuardian;
1978 * a texture does not need to be explicitly prepared by the user before it may
1979 * be rendered.
1980 */
1982prepare_now(int view,
1983 PreparedGraphicsObjects *prepared_objects,
1985 MutexHolder holder(_lock);
1986 CDReader cdata(_cycler);
1987
1988 // Don't exceed the actual number of views.
1989 view = max(min(view, cdata->_num_views - 1), 0);
1990
1991 // Get the list of PreparedGraphicsObjects for this view.
1992 Contexts &contexts = _prepared_views[prepared_objects];
1993 Contexts::const_iterator pvi;
1994 pvi = contexts.find(view);
1995 if (pvi != contexts.end()) {
1996 return (*pvi).second;
1997 }
1998
1999 TextureContext *tc = prepared_objects->prepare_texture_now(this, view, gsg);
2000 contexts[view] = tc;
2001
2002 return tc;
2003}
2004
2005/**
2006 * Returns the smallest power of 2 greater than or equal to value.
2007 */
2009up_to_power_2(int value) {
2010 if (value <= 1) {
2011 return 1;
2012 }
2013 int bit = get_next_higher_bit(((unsigned int)value) - 1);
2014 return (1 << bit);
2015}
2016
2017/**
2018 * Returns the largest power of 2 less than or equal to value.
2019 */
2021down_to_power_2(int value) {
2022 if (value <= 1) {
2023 return 1;
2024 }
2025 int bit = get_next_higher_bit(((unsigned int)value) >> 1);
2026 return (1 << bit);
2027}
2028
2029/**
2030 * Asks the PNMImage to change its scale when it reads the image, according to
2031 * the whims of the Config.prc file.
2032 *
2033 * For most efficient results, this method should be called after
2034 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2035 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2036 * already loaded; in this case it will rescale the image on the spot. Also
2037 * see rescale_texture().
2038 */
2040consider_rescale(PNMImage &pnmimage) {
2041 consider_rescale(pnmimage, get_name(), get_auto_texture_scale());
2042}
2043
2044/**
2045 * Asks the PNMImage to change its scale when it reads the image, according to
2046 * the whims of the Config.prc file.
2047 *
2048 * For most efficient results, this method should be called after
2049 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2050 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2051 * already loaded; in this case it will rescale the image on the spot. Also
2052 * see rescale_texture().
2053 */
2055consider_rescale(PNMImage &pnmimage, const string &name, AutoTextureScale auto_texture_scale) {
2056 int new_x_size = pnmimage.get_x_size();
2057 int new_y_size = pnmimage.get_y_size();
2058 if (adjust_size(new_x_size, new_y_size, name, false, auto_texture_scale)) {
2059 if (pnmimage.is_valid()) {
2060 // The image is already loaded. Rescale on the spot.
2061 PNMImage new_image(new_x_size, new_y_size, pnmimage.get_num_channels(),
2062 pnmimage.get_maxval(), pnmimage.get_type(),
2063 pnmimage.get_color_space());
2064 new_image.quick_filter_from(pnmimage);
2065 pnmimage.take_from(new_image);
2066 } else {
2067 // Rescale while reading. Some image types (e.g. jpeg) can take
2068 // advantage of this.
2069 pnmimage.set_read_size(new_x_size, new_y_size);
2070 }
2071 }
2072}
2073
2074/**
2075 * Returns the indicated TextureType converted to a string word.
2076 */
2078format_texture_type(TextureType tt) {
2079 switch (tt) {
2080 case TT_1d_texture:
2081 return "1d_texture";
2082 case TT_2d_texture:
2083 return "2d_texture";
2084 case TT_3d_texture:
2085 return "3d_texture";
2086 case TT_2d_texture_array:
2087 return "2d_texture_array";
2088 case TT_cube_map:
2089 return "cube_map";
2090 case TT_cube_map_array:
2091 return "cube_map_array";
2092 case TT_buffer_texture:
2093 return "buffer_texture";
2094 case TT_1d_texture_array:
2095 return "1d_texture_array";
2096 }
2097 return "**invalid**";
2098}
2099
2100/**
2101 * Returns the TextureType corresponding to the indicated string word.
2102 */
2103Texture::TextureType Texture::
2104string_texture_type(const string &str) {
2105 if (cmp_nocase(str, "1d_texture") == 0) {
2106 return TT_1d_texture;
2107 } else if (cmp_nocase(str, "2d_texture") == 0) {
2108 return TT_2d_texture;
2109 } else if (cmp_nocase(str, "3d_texture") == 0) {
2110 return TT_3d_texture;
2111 } else if (cmp_nocase(str, "2d_texture_array") == 0) {
2112 return TT_2d_texture_array;
2113 } else if (cmp_nocase(str, "cube_map") == 0) {
2114 return TT_cube_map;
2115 } else if (cmp_nocase(str, "cube_map_array") == 0) {
2116 return TT_cube_map_array;
2117 } else if (cmp_nocase(str, "buffer_texture") == 0) {
2118 return TT_buffer_texture;
2119 }
2120
2121 gobj_cat->error()
2122 << "Invalid Texture::TextureType value: " << str << "\n";
2123 return TT_2d_texture;
2124}
2125
2126/**
2127 * Returns the indicated ComponentType converted to a string word.
2128 */
2130format_component_type(ComponentType ct) {
2131 switch (ct) {
2132 case T_unsigned_byte:
2133 return "unsigned_byte";
2134 case T_unsigned_short:
2135 return "unsigned_short";
2136 case T_float:
2137 return "float";
2138 case T_unsigned_int_24_8:
2139 return "unsigned_int_24_8";
2140 case T_int:
2141 return "int";
2142 case T_byte:
2143 return "unsigned_byte";
2144 case T_short:
2145 return "short";
2146 case T_half_float:
2147 return "half_float";
2148 case T_unsigned_int:
2149 return "unsigned_int";
2150 }
2151
2152 return "**invalid**";
2153}
2154
2155/**
2156 * Returns the ComponentType corresponding to the indicated string word.
2157 */
2158Texture::ComponentType Texture::
2159string_component_type(const string &str) {
2160 if (cmp_nocase(str, "unsigned_byte") == 0) {
2161 return T_unsigned_byte;
2162 } else if (cmp_nocase(str, "unsigned_short") == 0) {
2163 return T_unsigned_short;
2164 } else if (cmp_nocase(str, "float") == 0) {
2165 return T_float;
2166 } else if (cmp_nocase(str, "unsigned_int_24_8") == 0) {
2167 return T_unsigned_int_24_8;
2168 } else if (cmp_nocase(str, "int") == 0) {
2169 return T_int;
2170 } else if (cmp_nocase(str, "byte") == 0) {
2171 return T_byte;
2172 } else if (cmp_nocase(str, "short") == 0) {
2173 return T_short;
2174 } else if (cmp_nocase(str, "half_float") == 0) {
2175 return T_half_float;
2176 } else if (cmp_nocase(str, "unsigned_int") == 0) {
2177 return T_unsigned_int;
2178 }
2179
2180 gobj_cat->error()
2181 << "Invalid Texture::ComponentType value: " << str << "\n";
2182 return T_unsigned_byte;
2183}
2184
2185/**
2186 * Returns the indicated Format converted to a string word.
2187 */
2189format_format(Format format) {
2190 switch (format) {
2191 case F_depth_stencil:
2192 return "depth_stencil";
2193 case F_depth_component:
2194 return "depth_component";
2195 case F_depth_component16:
2196 return "depth_component16";
2197 case F_depth_component24:
2198 return "depth_component24";
2199 case F_depth_component32:
2200 return "depth_component32";
2201 case F_color_index:
2202 return "color_index";
2203 case F_red:
2204 return "red";
2205 case F_green:
2206 return "green";
2207 case F_blue:
2208 return "blue";
2209 case F_alpha:
2210 return "alpha";
2211 case F_rgb:
2212 return "rgb";
2213 case F_rgb5:
2214 return "rgb5";
2215 case F_rgb8:
2216 return "rgb8";
2217 case F_rgb12:
2218 return "rgb12";
2219 case F_rgb332:
2220 return "rgb332";
2221 case F_rgba:
2222 return "rgba";
2223 case F_rgbm:
2224 return "rgbm";
2225 case F_rgba4:
2226 return "rgba4";
2227 case F_rgba5:
2228 return "rgba5";
2229 case F_rgba8:
2230 return "rgba8";
2231 case F_rgba12:
2232 return "rgba12";
2233 case F_luminance:
2234 return "luminance";
2235 case F_luminance_alpha:
2236 return "luminance_alpha";
2237 case F_luminance_alphamask:
2238 return "luminance_alphamask";
2239 case F_rgba16:
2240 return "rgba16";
2241 case F_rgba32:
2242 return "rgba32";
2243 case F_r16:
2244 return "r16";
2245 case F_rg16:
2246 return "rg16";
2247 case F_rgb16:
2248 return "rgb16";
2249 case F_srgb:
2250 return "srgb";
2251 case F_srgb_alpha:
2252 return "srgb_alpha";
2253 case F_sluminance:
2254 return "sluminance";
2255 case F_sluminance_alpha:
2256 return "sluminance_alpha";
2257 case F_r32i:
2258 return "r32i";
2259 case F_r32:
2260 return "r32";
2261 case F_rg32:
2262 return "rg32";
2263 case F_rgb32:
2264 return "rgb32";
2265 case F_r8i:
2266 return "r8i";
2267 case F_rg8i:
2268 return "rg8i";
2269 case F_rgb8i:
2270 return "rgb8i";
2271 case F_rgba8i:
2272 return "rgba8i";
2273 case F_r11_g11_b10:
2274 return "r11g11b10";
2275 case F_rgb9_e5:
2276 return "rgb9_e5";
2277 case F_rgb10_a2:
2278 return "rgb10_a2";
2279 case F_rg:
2280 return "rg";
2281 case F_r16i:
2282 return "r16i";
2283 case F_rg16i:
2284 return "rg16i";
2285 case F_rgb16i:
2286 return "rgb16i";
2287 case F_rgba16i:
2288 return "rgba16i";
2289 case F_rg32i:
2290 return "rg32i";
2291 case F_rgb32i:
2292 return "rgb32i";
2293 case F_rgba32i:
2294 return "rgba32i";
2295 }
2296 return "**invalid**";
2297}
2298
2299/**
2300 * Returns the Format corresponding to the indicated string word.
2301 */
2302Texture::Format Texture::
2303string_format(const string &str) {
2304 if (cmp_nocase(str, "depth_stencil") == 0) {
2305 return F_depth_stencil;
2306 } else if (cmp_nocase(str, "depth_component") == 0) {
2307 return F_depth_component;
2308 } else if (cmp_nocase(str, "depth_component16") == 0 || cmp_nocase(str, "d16") == 0) {
2309 return F_depth_component16;
2310 } else if (cmp_nocase(str, "depth_component24") == 0 || cmp_nocase(str, "d24") == 0) {
2311 return F_depth_component24;
2312 } else if (cmp_nocase(str, "depth_component32") == 0 || cmp_nocase(str, "d32") == 0) {
2313 return F_depth_component32;
2314 } else if (cmp_nocase(str, "color_index") == 0) {
2315 return F_color_index;
2316 } else if (cmp_nocase(str, "red") == 0) {
2317 return F_red;
2318 } else if (cmp_nocase(str, "green") == 0) {
2319 return F_green;
2320 } else if (cmp_nocase(str, "blue") == 0) {
2321 return F_blue;
2322 } else if (cmp_nocase(str, "alpha") == 0) {
2323 return F_alpha;
2324 } else if (cmp_nocase(str, "rgb") == 0) {
2325 return F_rgb;
2326 } else if (cmp_nocase(str, "rgb5") == 0) {
2327 return F_rgb5;
2328 } else if (cmp_nocase(str, "rgb8") == 0 || cmp_nocase(str, "r8g8b8") == 0) {
2329 return F_rgb8;
2330 } else if (cmp_nocase(str, "rgb12") == 0) {
2331 return F_rgb12;
2332 } else if (cmp_nocase(str, "rgb332") == 0 || cmp_nocase(str, "r3g3b2") == 0) {
2333 return F_rgb332;
2334 } else if (cmp_nocase(str, "rgba") == 0) {
2335 return F_rgba;
2336 } else if (cmp_nocase(str, "rgbm") == 0) {
2337 return F_rgbm;
2338 } else if (cmp_nocase(str, "rgba4") == 0) {
2339 return F_rgba4;
2340 } else if (cmp_nocase(str, "rgba5") == 0) {
2341 return F_rgba5;
2342 } else if (cmp_nocase(str, "rgba8") == 0 || cmp_nocase(str, "r8g8b8a8") == 0) {
2343 return F_rgba8;
2344 } else if (cmp_nocase(str, "rgba12") == 0) {
2345 return F_rgba12;
2346 } else if (cmp_nocase(str, "luminance") == 0) {
2347 return F_luminance;
2348 } else if (cmp_nocase(str, "luminance_alpha") == 0) {
2349 return F_luminance_alpha;
2350 } else if (cmp_nocase(str, "luminance_alphamask") == 0) {
2351 return F_luminance_alphamask;
2352 } else if (cmp_nocase(str, "rgba16") == 0 || cmp_nocase(str, "r16g16b16a16") == 0) {
2353 return F_rgba16;
2354 } else if (cmp_nocase(str, "rgba32") == 0 || cmp_nocase(str, "r32g32b32a32") == 0) {
2355 return F_rgba32;
2356 } else if (cmp_nocase(str, "r16") == 0 || cmp_nocase(str, "red16") == 0) {
2357 return F_r16;
2358 } else if (cmp_nocase(str, "r16i") == 0) {
2359 return F_r16i;
2360 } else if (cmp_nocase(str, "rg16") == 0 || cmp_nocase(str, "r16g16") == 0) {
2361 return F_rg16;
2362 } else if (cmp_nocase(str, "rgb16") == 0 || cmp_nocase(str, "r16g16b16") == 0) {
2363 return F_rgb16;
2364 } else if (cmp_nocase(str, "srgb") == 0) {
2365 return F_srgb;
2366 } else if (cmp_nocase(str, "srgb_alpha") == 0) {
2367 return F_srgb_alpha;
2368 } else if (cmp_nocase(str, "sluminance") == 0) {
2369 return F_sluminance;
2370 } else if (cmp_nocase(str, "sluminance_alpha") == 0) {
2371 return F_sluminance_alpha;
2372 } else if (cmp_nocase(str, "r32i") == 0) {
2373 return F_r32i;
2374 } else if (cmp_nocase(str, "r32") == 0 || cmp_nocase(str, "red32") == 0) {
2375 return F_r32;
2376 } else if (cmp_nocase(str, "rg32") == 0 || cmp_nocase(str, "r32g32") == 0) {
2377 return F_rg32;
2378 } else if (cmp_nocase(str, "rgb32") == 0 || cmp_nocase(str, "r32g32b32") == 0) {
2379 return F_rgb32;
2380 } else if (cmp_nocase_uh(str, "r8i") == 0) {
2381 return F_r8i;
2382 } else if (cmp_nocase_uh(str, "rg8i") == 0 || cmp_nocase_uh(str, "r8g8i") == 0) {
2383 return F_rg8i;
2384 } else if (cmp_nocase_uh(str, "rgb8i") == 0 || cmp_nocase_uh(str, "r8g8b8i") == 0) {
2385 return F_rgb8i;
2386 } else if (cmp_nocase_uh(str, "rgba8i") == 0 || cmp_nocase_uh(str, "r8g8b8a8i") == 0) {
2387 return F_rgba8i;
2388 } else if (cmp_nocase(str, "r11g11b10") == 0) {
2389 return F_r11_g11_b10;
2390 } else if (cmp_nocase(str, "rgb9_e5") == 0) {
2391 return F_rgb9_e5;
2392 } else if (cmp_nocase_uh(str, "rgb10_a2") == 0 || cmp_nocase(str, "r10g10b10a2") == 0) {
2393 return F_rgb10_a2;
2394 } else if (cmp_nocase_uh(str, "rg") == 0) {
2395 return F_rg;
2396 } else if (cmp_nocase_uh(str, "r16i") == 0) {
2397 return F_r16i;
2398 } else if (cmp_nocase_uh(str, "rg16i") == 0 || cmp_nocase_uh(str, "r16g16i") == 0) {
2399 return F_rg16i;
2400 } else if (cmp_nocase_uh(str, "rgb16i") == 0 || cmp_nocase_uh(str, "r16g16b16i") == 0) {
2401 return F_rgb16i;
2402 } else if (cmp_nocase_uh(str, "rgba16i") == 0 || cmp_nocase_uh(str, "r16g16b16a16i") == 0) {
2403 return F_rgba16i;
2404 } else if (cmp_nocase_uh(str, "rg32i") == 0 || cmp_nocase_uh(str, "r32g32i") == 0) {
2405 return F_rg32i;
2406 } else if (cmp_nocase_uh(str, "rgb32i") == 0 || cmp_nocase_uh(str, "r32g32b32i") == 0) {
2407 return F_rgb32i;
2408 } else if (cmp_nocase_uh(str, "rgba32i") == 0 || cmp_nocase_uh(str, "r32g32b32a32i") == 0) {
2409 return F_rgba32i;
2410 }
2411
2412 gobj_cat->error()
2413 << "Invalid Texture::Format value: " << str << "\n";
2414 return F_rgba;
2415}
2416
2417/**
2418 * Returns the indicated CompressionMode converted to a string word.
2419 */
2421format_compression_mode(CompressionMode cm) {
2422 switch (cm) {
2423 case CM_default:
2424 return "default";
2425 case CM_off:
2426 return "off";
2427 case CM_on:
2428 return "on";
2429 case CM_fxt1:
2430 return "fxt1";
2431 case CM_dxt1:
2432 return "dxt1";
2433 case CM_dxt2:
2434 return "dxt2";
2435 case CM_dxt3:
2436 return "dxt3";
2437 case CM_dxt4:
2438 return "dxt4";
2439 case CM_dxt5:
2440 return "dxt5";
2441 case CM_pvr1_2bpp:
2442 return "pvr1_2bpp";
2443 case CM_pvr1_4bpp:
2444 return "pvr1_4bpp";
2445 case CM_rgtc:
2446 return "rgtc";
2447 case CM_etc1:
2448 return "etc1";
2449 case CM_etc2:
2450 return "etc2";
2451 case CM_eac:
2452 return "eac";
2453 }
2454
2455 return "**invalid**";
2456}
2457
2458/**
2459 * Returns the CompressionMode value associated with the given string
2460 * representation.
2461 */
2462Texture::CompressionMode Texture::
2463string_compression_mode(const string &str) {
2464 if (cmp_nocase_uh(str, "default") == 0) {
2465 return CM_default;
2466 } else if (cmp_nocase_uh(str, "off") == 0) {
2467 return CM_off;
2468 } else if (cmp_nocase_uh(str, "on") == 0) {
2469 return CM_on;
2470 } else if (cmp_nocase_uh(str, "fxt1") == 0) {
2471 return CM_fxt1;
2472 } else if (cmp_nocase_uh(str, "dxt1") == 0) {
2473 return CM_dxt1;
2474 } else if (cmp_nocase_uh(str, "dxt2") == 0) {
2475 return CM_dxt2;
2476 } else if (cmp_nocase_uh(str, "dxt3") == 0) {
2477 return CM_dxt3;
2478 } else if (cmp_nocase_uh(str, "dxt4") == 0) {
2479 return CM_dxt4;
2480 } else if (cmp_nocase_uh(str, "dxt5") == 0) {
2481 return CM_dxt5;
2482 } else if (cmp_nocase_uh(str, "pvr1_2bpp") == 0) {
2483 return CM_pvr1_2bpp;
2484 } else if (cmp_nocase_uh(str, "pvr1_4bpp") == 0) {
2485 return CM_pvr1_4bpp;
2486 } else if (cmp_nocase_uh(str, "rgtc") == 0) {
2487 return CM_rgtc;
2488 } else if (cmp_nocase_uh(str, "etc1") == 0) {
2489 return CM_etc1;
2490 } else if (cmp_nocase_uh(str, "etc2") == 0) {
2491 return CM_etc2;
2492 } else if (cmp_nocase_uh(str, "eac") == 0) {
2493 return CM_eac;
2494 }
2495
2496 gobj_cat->error()
2497 << "Invalid Texture::CompressionMode value: " << str << "\n";
2498 return CM_default;
2499}
2500
2501
2502/**
2503 * Returns the indicated QualityLevel converted to a string word.
2504 */
2506format_quality_level(QualityLevel ql) {
2507 switch (ql) {
2508 case QL_default:
2509 return "default";
2510 case QL_fastest:
2511 return "fastest";
2512 case QL_normal:
2513 return "normal";
2514 case QL_best:
2515 return "best";
2516 }
2517
2518 return "**invalid**";
2519}
2520
2521/**
2522 * Returns the QualityLevel value associated with the given string
2523 * representation.
2524 */
2525Texture::QualityLevel Texture::
2526string_quality_level(const string &str) {
2527 if (cmp_nocase(str, "default") == 0) {
2528 return QL_default;
2529 } else if (cmp_nocase(str, "fastest") == 0) {
2530 return QL_fastest;
2531 } else if (cmp_nocase(str, "normal") == 0) {
2532 return QL_normal;
2533 } else if (cmp_nocase(str, "best") == 0) {
2534 return QL_best;
2535 }
2536
2537 gobj_cat->error()
2538 << "Invalid Texture::QualityLevel value: " << str << "\n";
2539 return QL_default;
2540}
2541
2542/**
2543 * This method is called by the GraphicsEngine at the beginning of the frame
2544 * *after* a texture has been successfully uploaded to graphics memory. It is
2545 * intended as a callback so the texture can release its RAM image, if
2546 * _keep_ram_image is false.
2547 *
2548 * This is called indirectly when the GSG calls
2549 * GraphicsEngine::texture_uploaded().
2550 */
2553 CDLockedReader cdata(_cycler);
2554
2555 if (!keep_texture_ram && !cdata->_keep_ram_image) {
2556 // Once we have prepared the texture, we can generally safely remove the
2557 // pixels from main RAM. The GSG is now responsible for remembering what
2558 // it looks like.
2559
2560 CDWriter cdataw(_cycler, cdata, false);
2561 if (gobj_cat.is_debug()) {
2562 gobj_cat.debug()
2563 << "Dumping RAM for texture " << get_name() << "\n";
2564 }
2565 do_clear_ram_image(cdataw);
2566 }
2567}
2568
2569/**
2570 * Should be overridden by derived classes to return true if cull_callback()
2571 * has been defined. Otherwise, returns false to indicate cull_callback()
2572 * does not need to be called for this node during the cull traversal.
2573 */
2575has_cull_callback() const {
2576 return false;
2577}
2578
2579/**
2580 * If has_cull_callback() returns true, this function will be called during
2581 * the cull traversal to perform any additional operations that should be
2582 * performed at cull time.
2583 *
2584 * This is called each time the Texture is discovered applied to a Geom in the
2585 * traversal. It should return true if the Geom is visible, false if it
2586 * should be omitted.
2587 */
2590 return true;
2591}
2592
2593/**
2594 * A factory function to make a new Texture, used to pass to the TexturePool.
2595 */
2596PT(Texture) Texture::
2597make_texture() {
2598 return new Texture;
2599}
2600
2601/**
2602 * Returns true if the indicated component type is unsigned, false otherwise.
2603 */
2605is_unsigned(Texture::ComponentType ctype) {
2606 return (ctype == T_unsigned_byte ||
2607 ctype == T_unsigned_short ||
2608 ctype == T_unsigned_int_24_8 ||
2609 ctype == T_unsigned_int);
2610}
2611
2612/**
2613 * Returns true if the indicated compression mode is one of the specific
2614 * compression types, false otherwise.
2615 */
2617is_specific(Texture::CompressionMode compression) {
2618 switch (compression) {
2619 case CM_default:
2620 case CM_off:
2621 case CM_on:
2622 return false;
2623
2624 default:
2625 return true;
2626 }
2627}
2628
2629/**
2630 * Returns true if the indicated format includes alpha, false otherwise.
2631 */
2633has_alpha(Format format) {
2634 switch (format) {
2635 case F_alpha:
2636 case F_rgba:
2637 case F_rgbm:
2638 case F_rgba4:
2639 case F_rgba5:
2640 case F_rgba8:
2641 case F_rgba12:
2642 case F_rgba16:
2643 case F_rgba32:
2644 case F_luminance_alpha:
2645 case F_luminance_alphamask:
2646 case F_srgb_alpha:
2647 case F_sluminance_alpha:
2648 case F_rgba8i:
2649 case F_rgb10_a2:
2650 case F_rgba16i:
2651 case F_rgba32i:
2652 return true;
2653
2654 default:
2655 return false;
2656 }
2657}
2658
2659/**
2660 * Returns true if the indicated format includes a binary alpha only, false
2661 * otherwise.
2662 */
2664has_binary_alpha(Format format) {
2665 switch (format) {
2666 case F_rgbm:
2667 return true;
2668
2669 default:
2670 return false;
2671 }
2672}
2673
2674/**
2675 * Returns true if the indicated format is in the sRGB color space, false
2676 * otherwise.
2677 */
2679is_srgb(Format format) {
2680 switch (format) {
2681 case F_srgb:
2682 case F_srgb_alpha:
2683 case F_sluminance:
2684 case F_sluminance_alpha:
2685 return true;
2686
2687 default:
2688 return false;
2689 }
2690}
2691
2692/**
2693 * Returns true if the indicated format is an integer format, false otherwise.
2694 */
2696is_integer(Format format) {
2697 switch (format) {
2698 case F_r32i:
2699 case F_r8i:
2700 case F_rg8i:
2701 case F_rgb8i:
2702 case F_rgba8i:
2703 case F_r16i:
2704 case F_rg16i:
2705 case F_rgb16i:
2706 case F_rgba16i:
2707 case F_rg32i:
2708 case F_rgb32i:
2709 case F_rgba32i:
2710 return true;
2711
2712 default:
2713 return false;
2714 }
2715}
2716
2717/**
2718 * Computes the proper size of the texture, based on the original size, the
2719 * filename, and the resizing whims of the config file.
2720 *
2721 * x_size and y_size should be loaded with the texture image's original size
2722 * on disk. On return, they will be loaded with the texture's in-memory
2723 * target size. The return value is true if the size has been adjusted, or
2724 * false if it is the same.
2725 */
2727adjust_size(int &x_size, int &y_size, const string &name,
2728 bool for_padding, AutoTextureScale auto_texture_scale) {
2729 bool exclude = false;
2730 int num_excludes = exclude_texture_scale.get_num_unique_values();
2731 for (int i = 0; i < num_excludes && !exclude; ++i) {
2732 GlobPattern pat(exclude_texture_scale.get_unique_value(i));
2733 if (pat.matches(name)) {
2734 exclude = true;
2735 }
2736 }
2737
2738 int new_x_size = x_size;
2739 int new_y_size = y_size;
2740
2741 if (!exclude) {
2742 new_x_size = (int)cfloor(new_x_size * texture_scale + 0.5);
2743 new_y_size = (int)cfloor(new_y_size * texture_scale + 0.5);
2744
2745 // Don't auto-scale below 4 in either dimension. This causes problems for
2746 // DirectX and texture compression.
2747 new_x_size = min(max(new_x_size, (int)texture_scale_limit), x_size);
2748 new_y_size = min(max(new_y_size, (int)texture_scale_limit), y_size);
2749 }
2750
2751 AutoTextureScale ats = auto_texture_scale;
2752 if (ats == ATS_unspecified) {
2753 ats = get_textures_power_2();
2754 }
2755 if (!for_padding && ats == ATS_pad) {
2756 // If we're not calculating the padding size--that is, we're calculating
2757 // the initial scaling size instead--then ignore ATS_pad, and treat it the
2758 // same as ATS_none.
2759 ats = ATS_none;
2760 }
2761
2762 switch (ats) {
2763 case ATS_down:
2764 new_x_size = down_to_power_2(new_x_size);
2765 new_y_size = down_to_power_2(new_y_size);
2766 break;
2767
2768 case ATS_up:
2769 case ATS_pad:
2770 new_x_size = up_to_power_2(new_x_size);
2771 new_y_size = up_to_power_2(new_y_size);
2772 break;
2773
2774 case ATS_none:
2775 case ATS_unspecified:
2776 break;
2777 }
2778
2779 ats = textures_square.get_value();
2780 if (!for_padding && ats == ATS_pad) {
2781 ats = ATS_none;
2782 }
2783 switch (ats) {
2784 case ATS_down:
2785 new_x_size = new_y_size = min(new_x_size, new_y_size);
2786 break;
2787
2788 case ATS_up:
2789 case ATS_pad:
2790 new_x_size = new_y_size = max(new_x_size, new_y_size);
2791 break;
2792
2793 case ATS_none:
2794 case ATS_unspecified:
2795 break;
2796 }
2797
2798 if (!exclude) {
2799 int max_dimension = max_texture_dimension;
2800
2801 if (max_dimension < 0) {
2803 if (gsg != nullptr) {
2804 max_dimension = gsg->get_max_texture_dimension();
2805 }
2806 }
2807
2808 if (max_dimension > 0) {
2809 new_x_size = min(new_x_size, (int)max_dimension);
2810 new_y_size = min(new_y_size, (int)max_dimension);
2811 }
2812 }
2813
2814 if (x_size != new_x_size || y_size != new_y_size) {
2815 x_size = new_x_size;
2816 y_size = new_y_size;
2817 return true;
2818 }
2819
2820 return false;
2821}
2822
2823/**
2824 * May be called prior to calling read_txo() or any bam-related Texture-
2825 * creating callback, to ensure that the proper dynamic libraries for a
2826 * Texture of the current class type, and the indicated filename, have been
2827 * already loaded.
2828 *
2829 * This is a low-level function that should not normally need to be called
2830 * directly by the user.
2831 *
2832 * Note that for best results you must first create a Texture object of the
2833 * appropriate class type for your filename, for instance with
2834 * TexturePool::make_texture().
2835 */
2837ensure_loader_type(const Filename &filename) {
2838 // For a plain Texture type, this doesn't need to do anything.
2839}
2840
2841/**
2842 * Called by TextureContext to give the Texture a chance to mark itself dirty
2843 * before rendering, if necessary.
2844 */
2845void Texture::
2846reconsider_dirty() {
2847}
2848
2849/**
2850 * Works like adjust_size, but also considers the texture class. Movie
2851 * textures, for instance, always pad outwards, regardless of textures-
2852 * power-2.
2853 */
2854bool Texture::
2855do_adjust_this_size(const CData *cdata, int &x_size, int &y_size, const string &name,
2856 bool for_padding) const {
2857 return adjust_size(x_size, y_size, name, for_padding, cdata->_auto_texture_scale);
2858}
2859
2860/**
2861 * The internal implementation of the various read() methods.
2862 */
2863bool Texture::
2864do_read(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
2865 int primary_file_num_channels, int alpha_file_channel,
2866 int z, int n, bool read_pages, bool read_mipmaps,
2867 const LoaderOptions &options, BamCacheRecord *record) {
2868 PStatTimer timer(_texture_read_pcollector);
2869
2870 if (options.get_auto_texture_scale() != ATS_unspecified) {
2871 cdata->_auto_texture_scale = options.get_auto_texture_scale();
2872 }
2873
2874 bool header_only = ((options.get_texture_flags() & (LoaderOptions::TF_preload | LoaderOptions::TF_preload_simple)) == 0);
2875 if (record != nullptr) {
2876 header_only = false;
2877 }
2878
2879 if ((z == 0 || read_pages) && (n == 0 || read_mipmaps)) {
2880 // When we re-read the page 0 of the base image, we clear everything and
2881 // start over.
2882 do_clear_ram_image(cdata);
2883 }
2884
2885 if (is_txo_filename(fullpath)) {
2886 if (record != nullptr) {
2887 record->add_dependent_file(fullpath);
2888 }
2889 return do_read_txo_file(cdata, fullpath);
2890 }
2891
2892 if (is_dds_filename(fullpath)) {
2893 if (record != nullptr) {
2894 record->add_dependent_file(fullpath);
2895 }
2896 return do_read_dds_file(cdata, fullpath, header_only);
2897 }
2898
2899 if (is_ktx_filename(fullpath)) {
2900 if (record != nullptr) {
2901 record->add_dependent_file(fullpath);
2902 }
2903 return do_read_ktx_file(cdata, fullpath, header_only);
2904 }
2905
2906 // If read_pages or read_mipmaps is specified, then z and n actually
2907 // indicate z_size and n_size, respectively--the numerical limits on which
2908 // to search for filenames.
2909 int z_size = z;
2910 int n_size = n;
2911
2912 // Certain texture types have an implicit z_size. If z_size is omitted,
2913 // choose an appropriate default based on the texture type.
2914 if (z_size == 0) {
2915 switch (cdata->_texture_type) {
2916 case TT_1d_texture:
2917 case TT_2d_texture:
2918 case TT_buffer_texture:
2919 z_size = 1;
2920 break;
2921
2922 case TT_cube_map:
2923 z_size = 6;
2924 break;
2925
2926 default:
2927 break;
2928 }
2929 }
2930
2931 int num_views = 0;
2932 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
2933 // We'll be loading a multiview texture.
2934 read_pages = true;
2935 if (options.get_texture_num_views() != 0) {
2936 num_views = options.get_texture_num_views();
2937 do_set_num_views(cdata, num_views);
2938 }
2939 }
2940
2942
2943 if (read_pages && read_mipmaps) {
2944 // Read a sequence of pages * mipmap levels.
2945 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
2946 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
2947 do_set_z_size(cdata, z_size);
2948
2949 n = 0;
2950 while (true) {
2951 // For mipmap level 0, the total number of pages might be determined by
2952 // the number of files we find. After mipmap level 0, though, the
2953 // number of pages is predetermined.
2954 if (n != 0) {
2955 z_size = do_get_expected_mipmap_z_size(cdata, n);
2956 }
2957
2958 z = 0;
2959
2960 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2961 Filename alpha_n_pattern = Filename::pattern_filename(alpha_fullpath_pattern.get_filename_index(z));
2962
2963 if (!n_pattern.has_hash()) {
2964 gobj_cat.error()
2965 << "Filename requires two different hash sequences: " << fullpath
2966 << "\n";
2967 return false;
2968 }
2969
2970 Filename file = n_pattern.get_filename_index(n);
2971 Filename alpha_file = alpha_n_pattern.get_filename_index(n);
2972
2973 if ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
2974 (n_size != 0 && n < n_size)) {
2975 // Continue through the loop.
2976 } else {
2977 // We've reached the end of the mipmap sequence.
2978 break;
2979 }
2980
2981 int num_pages = z_size * num_views;
2982 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
2983 (num_pages != 0 && z < num_pages)) {
2984 if (!do_read_one(cdata, file, alpha_file, z, n, primary_file_num_channels,
2985 alpha_file_channel, options, header_only, record)) {
2986 return false;
2987 }
2988 ++z;
2989
2990 n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2991 file = n_pattern.get_filename_index(n);
2992 alpha_file = alpha_n_pattern.get_filename_index(n);
2993 }
2994
2995 if (n == 0 && n_size == 0) {
2996 // If n_size is not specified, it gets implicitly set after we read
2997 // the base texture image (which determines the size of the texture).
2998 n_size = do_get_expected_num_mipmap_levels(cdata);
2999 }
3000 ++n;
3001 }
3002 cdata->_fullpath = fullpath_pattern;
3003 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3004
3005 } else if (read_pages) {
3006 // Read a sequence of cube map or 3-D texture pages.
3007 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3008 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3009 if (!fullpath_pattern.has_hash()) {
3010 gobj_cat.error()
3011 << "Filename requires a hash mark: " << fullpath
3012 << "\n";
3013 return false;
3014 }
3015
3016 do_set_z_size(cdata, z_size);
3017 z = 0;
3018 Filename file = fullpath_pattern.get_filename_index(z);
3019 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3020
3021 int num_pages = z_size * num_views;
3022 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
3023 (num_pages != 0 && z < num_pages)) {
3024 if (!do_read_one(cdata, file, alpha_file, z, 0, primary_file_num_channels,
3025 alpha_file_channel, options, header_only, record)) {
3026 return false;
3027 }
3028 ++z;
3029
3030 file = fullpath_pattern.get_filename_index(z);
3031 alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3032 }
3033 cdata->_fullpath = fullpath_pattern;
3034 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3035
3036 } else if (read_mipmaps) {
3037 // Read a sequence of mipmap levels.
3038 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3039 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3040 if (!fullpath_pattern.has_hash()) {
3041 gobj_cat.error()
3042 << "Filename requires a hash mark: " << fullpath
3043 << "\n";
3044 return false;
3045 }
3046
3047 n = 0;
3048 Filename file = fullpath_pattern.get_filename_index(n);
3049 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3050
3051 while ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
3052 (n_size != 0 && n < n_size)) {
3053 if (!do_read_one(cdata, file, alpha_file, z, n,
3054 primary_file_num_channels, alpha_file_channel,
3055 options, header_only, record)) {
3056 return false;
3057 }
3058 ++n;
3059
3060 if (n_size == 0 && n >= do_get_expected_num_mipmap_levels(cdata)) {
3061 // Don't try to read more than the requisite number of mipmap levels
3062 // (unless the user insisted on it for some reason).
3063 break;
3064 }
3065
3066 file = fullpath_pattern.get_filename_index(n);
3067 alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3068 }
3069 cdata->_fullpath = fullpath_pattern;
3070 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3071
3072 } else {
3073 // Just an ordinary read of one file.
3074 if (!do_read_one(cdata, fullpath, alpha_fullpath, z, n,
3075 primary_file_num_channels, alpha_file_channel,
3076 options, header_only, record)) {
3077 return false;
3078 }
3079 }
3080
3081 cdata->_has_read_pages = read_pages;
3082 cdata->_has_read_mipmaps = read_mipmaps;
3083 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
3084
3085 if (header_only) {
3086 // If we were only supposed to be checking the image header information,
3087 // don't let the Texture think that it's got the image now.
3088 do_clear_ram_image(cdata);
3089 } else {
3090 if ((options.get_texture_flags() & LoaderOptions::TF_preload) != 0) {
3091 // If we intend to keep the ram image around, consider compressing it
3092 // etc.
3093 bool generate_mipmaps = ((options.get_texture_flags() & LoaderOptions::TF_generate_mipmaps) != 0);
3094 bool allow_compression = ((options.get_texture_flags() & LoaderOptions::TF_allow_compression) != 0);
3095 do_consider_auto_process_ram_image(cdata, generate_mipmaps || uses_mipmaps(), allow_compression);
3096 }
3097 }
3098
3099 return true;
3100}
3101
3102/**
3103 * Called only from do_read(), this method reads a single image file, either
3104 * one page or one mipmap level.
3105 */
3106bool Texture::
3107do_read_one(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
3108 int z, int n, int primary_file_num_channels, int alpha_file_channel,
3109 const LoaderOptions &options, bool header_only, BamCacheRecord *record) {
3110 if (record != nullptr) {
3111 nassertr(!header_only, false);
3112 record->add_dependent_file(fullpath);
3113 }
3114
3115 PNMImage image;
3116 PfmFile pfm;
3117 PNMReader *image_reader = image.make_reader(fullpath, nullptr, false);
3118 if (image_reader == nullptr) {
3119 gobj_cat.error()
3120 << "Texture::read() - couldn't read: " << fullpath << endl;
3121 return false;
3122 }
3123 image.copy_header_from(*image_reader);
3124
3125 AutoTextureScale auto_texture_scale = do_get_auto_texture_scale(cdata);
3126
3127 // If it's a floating-point image file, read it by default into a floating-
3128 // point texture.
3129 bool read_floating_point;
3130 int texture_load_type = (options.get_texture_flags() & (LoaderOptions::TF_integer | LoaderOptions::TF_float));
3131 switch (texture_load_type) {
3132 case LoaderOptions::TF_integer:
3133 read_floating_point = false;
3134 break;
3135
3136 case LoaderOptions::TF_float:
3137 read_floating_point = true;
3138 break;
3139
3140 default:
3141 // Neither TF_integer nor TF_float was specified; determine which way the
3142 // texture wants to be loaded.
3143 read_floating_point = (image_reader->is_floating_point());
3144 if (!alpha_fullpath.empty()) {
3145 read_floating_point = false;
3146 }
3147 }
3148
3149 if (header_only || textures_header_only) {
3150 int x_size = image.get_x_size();
3151 int y_size = image.get_y_size();
3152 if (z == 0 && n == 0) {
3153 cdata->_orig_file_x_size = x_size;
3154 cdata->_orig_file_y_size = y_size;
3155 }
3156
3157 if (textures_header_only) {
3158 // In this mode, we never intend to load the actual texture image
3159 // anyway, so we don't even need to make the size right.
3160 x_size = 1;
3161 y_size = 1;
3162
3163 } else {
3164 adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale);
3165 }
3166
3167 if (read_floating_point) {
3168 pfm.clear(x_size, y_size, image.get_num_channels());
3169 } else {
3170 image = PNMImage(x_size, y_size, image.get_num_channels(),
3171 image.get_maxval(), image.get_type(),
3172 image.get_color_space());
3173 image.fill(0.2, 0.3, 1.0);
3174 if (image.has_alpha()) {
3175 image.alpha_fill(1.0);
3176 }
3177 }
3178 delete image_reader;
3179
3180 } else {
3181 if (z == 0 && n == 0) {
3182 int x_size = image.get_x_size();
3183 int y_size = image.get_y_size();
3184
3185 cdata->_orig_file_x_size = x_size;
3186 cdata->_orig_file_y_size = y_size;
3187
3188 if (adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale)) {
3189 image.set_read_size(x_size, y_size);
3190 }
3191 } else {
3192 image.set_read_size(do_get_expected_mipmap_x_size(cdata, n),
3193 do_get_expected_mipmap_y_size(cdata, n));
3194 }
3195
3196 if (image.get_x_size() != image.get_read_x_size() ||
3197 image.get_y_size() != image.get_read_y_size()) {
3198 gobj_cat.info()
3199 << "Implicitly rescaling " << fullpath.get_basename() << " from "
3200 << image.get_x_size() << " by " << image.get_y_size() << " to "
3201 << image.get_read_x_size() << " by " << image.get_read_y_size()
3202 << "\n";
3203 }
3204
3205 bool success;
3206 if (read_floating_point) {
3207 success = pfm.read(image_reader);
3208 } else {
3209 success = image.read(image_reader);
3210 }
3211
3212 if (!success) {
3213 gobj_cat.error()
3214 << "Texture::read() - couldn't read: " << fullpath << endl;
3215 return false;
3216 }
3218 }
3219
3220 PNMImage alpha_image;
3221 if (!alpha_fullpath.empty()) {
3222 PNMReader *alpha_image_reader = alpha_image.make_reader(alpha_fullpath, nullptr, false);
3223 if (alpha_image_reader == nullptr) {
3224 gobj_cat.error()
3225 << "Texture::read() - couldn't read: " << alpha_fullpath << endl;
3226 return false;
3227 }
3228 alpha_image.copy_header_from(*alpha_image_reader);
3229
3230 if (record != nullptr) {
3231 record->add_dependent_file(alpha_fullpath);
3232 }
3233
3234 if (header_only || textures_header_only) {
3235 int x_size = image.get_x_size();
3236 int y_size = image.get_y_size();
3237 alpha_image = PNMImage(x_size, y_size, alpha_image.get_num_channels(),
3238 alpha_image.get_maxval(), alpha_image.get_type(),
3239 alpha_image.get_color_space());
3240 alpha_image.fill(1.0);
3241 if (alpha_image.has_alpha()) {
3242 alpha_image.alpha_fill(1.0);
3243 }
3244 delete alpha_image_reader;
3245
3246 } else {
3247 if (image.get_x_size() != alpha_image.get_x_size() ||
3248 image.get_y_size() != alpha_image.get_y_size()) {
3249 gobj_cat.info()
3250 << "Implicitly rescaling " << alpha_fullpath.get_basename()
3251 << " from " << alpha_image.get_x_size() << " by "
3252 << alpha_image.get_y_size() << " to " << image.get_x_size()
3253 << " by " << image.get_y_size() << "\n";
3254 alpha_image.set_read_size(image.get_x_size(), image.get_y_size());
3255 }
3256
3257 if (!alpha_image.read(alpha_image_reader)) {
3258 gobj_cat.error()
3259 << "Texture::read() - couldn't read (alpha): " << alpha_fullpath << endl;
3260 return false;
3261 }
3263 }
3264 }
3265
3266 if (z == 0 && n == 0) {
3267 if (!has_name()) {
3268 set_name(fullpath.get_basename_wo_extension());
3269 }
3270 if (cdata->_filename.empty()) {
3271 cdata->_filename = fullpath;
3272 cdata->_alpha_filename = alpha_fullpath;
3273
3274 // The first time we set the filename via a read() operation, we clear
3275 // keep_ram_image. The user can always set it again later if he needs
3276 // to.
3277 cdata->_keep_ram_image = false;
3278 }
3279
3280 cdata->_fullpath = fullpath;
3281 cdata->_alpha_fullpath = alpha_fullpath;
3282 }
3283
3284 if (!alpha_fullpath.empty()) {
3285 // The grayscale (alpha channel) image must be the same size as the main
3286 // image. This should really have been already guaranteed by the above.
3287 if (image.get_x_size() != alpha_image.get_x_size() ||
3288 image.get_y_size() != alpha_image.get_y_size()) {
3289 gobj_cat.info()
3290 << "Automatically rescaling " << alpha_fullpath.get_basename()
3291 << " from " << alpha_image.get_x_size() << " by "
3292 << alpha_image.get_y_size() << " to " << image.get_x_size()
3293 << " by " << image.get_y_size() << "\n";
3294
3295 PNMImage scaled(image.get_x_size(), image.get_y_size(),
3296 alpha_image.get_num_channels(),
3297 alpha_image.get_maxval(), alpha_image.get_type(),
3298 alpha_image.get_color_space());
3299 scaled.quick_filter_from(alpha_image);
3301 alpha_image = scaled;
3302 }
3303 }
3304
3305 if (n == 0) {
3306 consider_downgrade(image, primary_file_num_channels, get_name());
3307 cdata->_primary_file_num_channels = image.get_num_channels();
3308 cdata->_alpha_file_channel = 0;
3309 }
3310
3311 if (!alpha_fullpath.empty()) {
3312 // Make the original image a 4-component image by taking the grayscale
3313 // value from the second image.
3314 image.add_alpha();
3315
3316 if (alpha_file_channel == 4 ||
3317 (alpha_file_channel == 2 && alpha_image.get_num_channels() == 2)) {
3318
3319 if (!alpha_image.has_alpha()) {
3320 gobj_cat.error()
3321 << alpha_fullpath.get_basename() << " has no channel " << alpha_file_channel << ".\n";
3322 } else {
3323 // Use the alpha channel.
3324 for (int x = 0; x < image.get_x_size(); x++) {
3325 for (int y = 0; y < image.get_y_size(); y++) {
3326 image.set_alpha(x, y, alpha_image.get_alpha(x, y));
3327 }
3328 }
3329 }
3330 cdata->_alpha_file_channel = alpha_image.get_num_channels();
3331
3332 } else if (alpha_file_channel >= 1 && alpha_file_channel <= 3 &&
3333 alpha_image.get_num_channels() >= 3) {
3334 // Use the appropriate red, green, or blue channel.
3335 for (int x = 0; x < image.get_x_size(); x++) {
3336 for (int y = 0; y < image.get_y_size(); y++) {
3337 image.set_alpha(x, y, alpha_image.get_channel_val(x, y, alpha_file_channel - 1));
3338 }
3339 }
3340 cdata->_alpha_file_channel = alpha_file_channel;
3341
3342 } else {
3343 // Use the grayscale channel.
3344 for (int x = 0; x < image.get_x_size(); x++) {
3345 for (int y = 0; y < image.get_y_size(); y++) {
3346 image.set_alpha(x, y, alpha_image.get_gray(x, y));
3347 }
3348 }
3349 cdata->_alpha_file_channel = 0;
3350 }
3351 }
3352
3353 if (read_floating_point) {
3354 if (!do_load_one(cdata, pfm, fullpath.get_basename(), z, n, options)) {
3355 return false;
3356 }
3357 } else {
3358 // Now see if we want to pad the image within a larger power-of-2 image.
3359 int pad_x_size = 0;
3360 int pad_y_size = 0;
3361 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
3362 int new_x_size = image.get_x_size();
3363 int new_y_size = image.get_y_size();
3364 if (do_adjust_this_size(cdata, new_x_size, new_y_size, fullpath.get_basename(), true)) {
3365 pad_x_size = new_x_size - image.get_x_size();
3366 pad_y_size = new_y_size - image.get_y_size();
3367 PNMImage new_image(new_x_size, new_y_size, image.get_num_channels(),
3368 image.get_maxval(), image.get_type(),
3369 image.get_color_space());
3370 new_image.copy_sub_image(image, 0, new_y_size - image.get_y_size());
3371 image.take_from(new_image);
3372 }
3373 }
3374
3375 if (!do_load_one(cdata, image, fullpath.get_basename(), z, n, options)) {
3376 return false;
3377 }
3378
3379 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
3380 }
3381 return true;
3382}
3383
3384/**
3385 * Internal method to load a single page or mipmap level.
3386 */
3387bool Texture::
3388do_load_one(CData *cdata, const PNMImage &pnmimage, const string &name, int z, int n,
3389 const LoaderOptions &options) {
3390 if (cdata->_ram_images.size() <= 1 && n == 0) {
3391 // A special case for mipmap level 0. When we load mipmap level 0, unless
3392 // we already have mipmap levels, it determines the image properties like
3393 // size and number of components.
3394 if (!do_reconsider_z_size(cdata, z, options)) {
3395 return false;
3396 }
3397 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3398
3399 if (z == 0) {
3400 ComponentType component_type = T_unsigned_byte;
3401 xelval maxval = pnmimage.get_maxval();
3402 if (maxval > 255) {
3403 component_type = T_unsigned_short;
3404 }
3405
3406 if (!do_reconsider_image_properties(cdata, pnmimage.get_x_size(), pnmimage.get_y_size(),
3407 pnmimage.get_num_channels(), component_type,
3408 z, options)) {
3409 return false;
3410 }
3411 }
3412
3413 do_modify_ram_image(cdata);
3414 cdata->_loaded_from_image = true;
3415 }
3416
3417 do_modify_ram_mipmap_image(cdata, n);
3418
3419 // Ensure the PNMImage is an appropriate size.
3420 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3421 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3422 if (pnmimage.get_x_size() != x_size ||
3423 pnmimage.get_y_size() != y_size) {
3424 gobj_cat.info()
3425 << "Automatically rescaling " << name;
3426 if (n != 0) {
3427 gobj_cat.info(false)
3428 << " mipmap level " << n;
3429 }
3430 gobj_cat.info(false)
3431 << " from " << pnmimage.get_x_size() << " by "
3432 << pnmimage.get_y_size() << " to " << x_size << " by "
3433 << y_size << "\n";
3434
3435 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels(),
3436 pnmimage.get_maxval(), pnmimage.get_type(),
3437 pnmimage.get_color_space());
3438 scaled.quick_filter_from(pnmimage);
3440
3441 convert_from_pnmimage(cdata->_ram_images[n]._image,
3442 do_get_expected_ram_mipmap_page_size(cdata, n),
3443 x_size, 0, 0, z, scaled,
3444 cdata->_num_components, cdata->_component_width);
3445 } else {
3446 // Now copy the pixel data from the PNMImage into our internal
3447 // cdata->_image component.
3448 convert_from_pnmimage(cdata->_ram_images[n]._image,
3449 do_get_expected_ram_mipmap_page_size(cdata, n),
3450 x_size, 0, 0, z, pnmimage,
3451 cdata->_num_components, cdata->_component_width);
3452 }
3454
3455 return true;
3456}
3457
3458/**
3459 * Internal method to load a single page or mipmap level.
3460 */
3461bool Texture::
3462do_load_one(CData *cdata, const PfmFile &pfm, const string &name, int z, int n,
3463 const LoaderOptions &options) {
3464 if (cdata->_ram_images.size() <= 1 && n == 0) {
3465 // A special case for mipmap level 0. When we load mipmap level 0, unless
3466 // we already have mipmap levels, it determines the image properties like
3467 // size and number of components.
3468 if (!do_reconsider_z_size(cdata, z, options)) {
3469 return false;
3470 }
3471 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3472
3473 if (z == 0) {
3474 ComponentType component_type = T_float;
3475 if (!do_reconsider_image_properties(cdata, pfm.get_x_size(), pfm.get_y_size(),
3476 pfm.get_num_channels(), component_type,
3477 z, options)) {
3478 return false;
3479 }
3480 }
3481
3482 do_modify_ram_image(cdata);
3483 cdata->_loaded_from_image = true;
3484 }
3485
3486 do_modify_ram_mipmap_image(cdata, n);
3487
3488 // Ensure the PfmFile is an appropriate size.
3489 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3490 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3491 if (pfm.get_x_size() != x_size ||
3492 pfm.get_y_size() != y_size) {
3493 gobj_cat.info()
3494 << "Automatically rescaling " << name;
3495 if (n != 0) {
3496 gobj_cat.info(false)
3497 << " mipmap level " << n;
3498 }
3499 gobj_cat.info(false)
3500 << " from " << pfm.get_x_size() << " by "
3501 << pfm.get_y_size() << " to " << x_size << " by "
3502 << y_size << "\n";
3503
3504 PfmFile scaled(pfm);
3505 scaled.resize(x_size, y_size);
3507
3508 convert_from_pfm(cdata->_ram_images[n]._image,
3509 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3510 scaled, cdata->_num_components, cdata->_component_width);
3511 } else {
3512 // Now copy the pixel data from the PfmFile into our internal
3513 // cdata->_image component.
3514 convert_from_pfm(cdata->_ram_images[n]._image,
3515 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3516 pfm, cdata->_num_components, cdata->_component_width);
3517 }
3519
3520 return true;
3521}
3522
3523/**
3524 * Internal method to load an image into a section of a texture page or mipmap
3525 * level.
3526 */
3527bool Texture::
3528do_load_sub_image(CData *cdata, const PNMImage &image, int x, int y, int z, int n) {
3529 nassertr(n >= 0 && (size_t)n < cdata->_ram_images.size(), false);
3530
3531 int tex_x_size = do_get_expected_mipmap_x_size(cdata, n);
3532 int tex_y_size = do_get_expected_mipmap_y_size(cdata, n);
3533 int tex_z_size = do_get_expected_mipmap_z_size(cdata, n);
3534
3535 nassertr(x >= 0 && x < tex_x_size, false);
3536 nassertr(y >= 0 && y < tex_y_size, false);
3537 nassertr(z >= 0 && z < tex_z_size, false);
3538
3539 nassertr(image.get_x_size() + x <= tex_x_size, false);
3540 nassertr(image.get_y_size() + y <= tex_y_size, false);
3541
3542 // Flip y
3543 y = cdata->_y_size - (image.get_y_size() + y);
3544
3545 cdata->inc_image_modified();
3546 do_modify_ram_mipmap_image(cdata, n);
3547 convert_from_pnmimage(cdata->_ram_images[n]._image,
3548 do_get_expected_ram_mipmap_page_size(cdata, n),
3549 tex_x_size, x, y, z, image,
3550 cdata->_num_components, cdata->_component_width);
3551
3552 return true;
3553}
3554
3555/**
3556 * Called internally when read() detects a txo file. Assumes the lock is
3557 * already held.
3558 */
3559bool Texture::
3560do_read_txo_file(CData *cdata, const Filename &fullpath) {
3562
3563 Filename filename = Filename::binary_filename(fullpath);
3564 PT(VirtualFile) file = vfs->get_file(filename);
3565 if (file == nullptr) {
3566 // No such file.
3567 gobj_cat.error()
3568 << "Could not find " << fullpath << "\n";
3569 return false;
3570 }
3571
3572 if (gobj_cat.is_debug()) {
3573 gobj_cat.debug()
3574 << "Reading texture object " << filename << "\n";
3575 }
3576
3577 istream *in = file->open_read_file(true);
3578 if (in == nullptr) {
3579 gobj_cat.error()
3580 << "Failed to open " << filename << " for reading.\n";
3581 return false;
3582 }
3583
3584 bool success = do_read_txo(cdata, *in, fullpath);
3585 vfs->close_read_file(in);
3586
3587 cdata->_fullpath = fullpath;
3588 cdata->_alpha_fullpath = Filename();
3589 cdata->_keep_ram_image = false;
3590
3591 return success;
3592}
3593
3594/**
3595 *
3596 */
3597bool Texture::
3598do_read_txo(CData *cdata, istream &in, const string &filename) {
3599 PT(Texture) other = make_from_txo(in, filename);
3600 if (other == nullptr) {
3601 return false;
3602 }
3603
3604 CDReader cdata_other(other->_cycler);
3605 Namable::operator = (*other);
3606 do_assign(cdata, other, cdata_other);
3607
3608 cdata->_loaded_from_image = true;
3609 cdata->_loaded_from_txo = true;
3610 cdata->_has_read_pages = false;
3611 cdata->_has_read_mipmaps = false;
3612 cdata->_num_mipmap_levels_read = 0;
3613 return true;
3614}
3615
3616/**
3617 * Called internally when read() detects a DDS file. Assumes the lock is
3618 * already held.
3619 */
3620bool Texture::
3621do_read_dds_file(CData *cdata, const Filename &fullpath, bool header_only) {
3623
3624 Filename filename = Filename::binary_filename(fullpath);
3625 PT(VirtualFile) file = vfs->get_file(filename);
3626 if (file == nullptr) {
3627 // No such file.
3628 gobj_cat.error()
3629 << "Could not find " << fullpath << "\n";
3630 return false;
3631 }
3632
3633 if (gobj_cat.is_debug()) {
3634 gobj_cat.debug()
3635 << "Reading DDS file " << filename << "\n";
3636 }
3637
3638 istream *in = file->open_read_file(true);
3639 if (in == nullptr) {
3640 gobj_cat.error()
3641 << "Failed to open " << filename << " for reading.\n";
3642 return false;
3643 }
3644
3645 bool success = do_read_dds(cdata, *in, fullpath, header_only);
3646 vfs->close_read_file(in);
3647
3648 if (!has_name()) {
3649 set_name(fullpath.get_basename_wo_extension());
3650 }
3651
3652 cdata->_fullpath = fullpath;
3653 cdata->_alpha_fullpath = Filename();
3654 cdata->_keep_ram_image = false;
3655
3656 return success;
3657}
3658
3659/**
3660 *
3661 */
3662bool Texture::
3663do_read_dds(CData *cdata, istream &in, const string &filename, bool header_only) {
3664 StreamReader dds(in);
3665
3666 // DDS header (19 words)
3667 DDSHeader header;
3668 header.dds_magic = dds.get_uint32();
3669 header.dds_size = dds.get_uint32();
3670 header.dds_flags = dds.get_uint32();
3671 header.height = dds.get_uint32();
3672 header.width = dds.get_uint32();
3673 header.pitch = dds.get_uint32();
3674 header.depth = dds.get_uint32();
3675 header.num_levels = dds.get_uint32();
3676 dds.skip_bytes(44);
3677
3678 // Pixelformat (8 words)
3679 header.pf.pf_size = dds.get_uint32();
3680 header.pf.pf_flags = dds.get_uint32();
3681 header.pf.four_cc = dds.get_uint32();
3682 header.pf.rgb_bitcount = dds.get_uint32();
3683 header.pf.r_mask = dds.get_uint32();
3684 header.pf.g_mask = dds.get_uint32();
3685 header.pf.b_mask = dds.get_uint32();
3686 header.pf.a_mask = dds.get_uint32();
3687
3688 // Caps (4 words)
3689 header.caps.caps1 = dds.get_uint32();
3690 header.caps.caps2 = dds.get_uint32();
3691 header.caps.ddsx = dds.get_uint32();
3692 dds.skip_bytes(4);
3693
3694 // Pad out to 32 words
3695 dds.skip_bytes(4);
3696
3697 if (header.dds_magic != DDS_MAGIC || (in.fail() || in.eof())) {
3698 gobj_cat.error()
3699 << filename << " is not a DDS file.\n";
3700 return false;
3701 }
3702
3703 if ((header.dds_flags & DDSD_MIPMAPCOUNT) == 0) {
3704 // No bit set means only the base mipmap level.
3705 header.num_levels = 1;
3706
3707 } else if (header.num_levels == 0) {
3708 // Some files seem to have this set to 0 for some reason--existing readers
3709 // assume 0 means 1.
3710 header.num_levels = 1;
3711 }
3712
3713 TextureType texture_type;
3714 if (header.caps.caps2 & DDSCAPS2_CUBEMAP) {
3715 static const unsigned int all_faces =
3716 (DDSCAPS2_CUBEMAP_POSITIVEX |
3717 DDSCAPS2_CUBEMAP_POSITIVEY |
3718 DDSCAPS2_CUBEMAP_POSITIVEZ |
3719 DDSCAPS2_CUBEMAP_NEGATIVEX |
3720 DDSCAPS2_CUBEMAP_NEGATIVEY |
3721 DDSCAPS2_CUBEMAP_NEGATIVEZ);
3722 if ((header.caps.caps2 & all_faces) != all_faces) {
3723 gobj_cat.error()
3724 << filename << " is missing some cube map faces; cannot load.\n";
3725 return false;
3726 }
3727 header.depth = 6;
3728 texture_type = TT_cube_map;
3729
3730 } else if (header.caps.caps2 & DDSCAPS2_VOLUME) {
3731 texture_type = TT_3d_texture;
3732
3733 } else {
3734 texture_type = TT_2d_texture;
3735 header.depth = 1;
3736 }
3737
3738 // Determine the function to use to read the DDS image.
3739 typedef PTA_uchar (*ReadDDSLevelFunc)(Texture *tex, Texture::CData *cdata,
3740 const DDSHeader &header, int n, istream &in);
3741 ReadDDSLevelFunc func = nullptr;
3742
3743 Format format = F_rgb;
3744 ComponentType component_type = T_unsigned_byte;
3745
3746 do_clear_ram_image(cdata);
3747 CompressionMode compression = CM_off;
3748
3749 if ((header.pf.pf_flags & DDPF_FOURCC) != 0 &&
3750 header.pf.four_cc == 0x30315844) { // 'DX10'
3751 // A DirectX 10 style texture, which has an additional header.
3752 func = read_dds_level_generic_uncompressed;
3753 unsigned int dxgi_format = dds.get_uint32();
3754 unsigned int dimension = dds.get_uint32();
3755 unsigned int misc_flag = dds.get_uint32();
3756 unsigned int array_size = dds.get_uint32();
3757 /*unsigned int alpha_mode = */dds.get_uint32();
3758
3759 switch (dxgi_format) {
3760 case 2: // DXGI_FORMAT_R32G32B32A32_FLOAT
3761 format = F_rgba32;
3762 component_type = T_float;
3763 func = read_dds_level_abgr32;
3764 break;
3765 case 10: // DXGI_FORMAT_R16G16B16A16_FLOAT
3766 format = F_rgba16;
3767 component_type = T_half_float;
3768 func = read_dds_level_abgr16;
3769 break;
3770 case 11: // DXGI_FORMAT_R16G16B16A16_UNORM
3771 format = F_rgba16;
3772 component_type = T_unsigned_short;
3773 func = read_dds_level_abgr16;
3774 break;
3775 case 12: // DXGI_FORMAT_R16G16B16A16_UINT
3776 format = F_rgba16i;
3777 component_type = T_unsigned_short;
3778 func = read_dds_level_abgr16;
3779 break;
3780 case 14: // DXGI_FORMAT_R16G16B16A16_SINT
3781 format = F_rgba16i;
3782 component_type = T_short;
3783 func = read_dds_level_abgr16;
3784 break;
3785 case 16: // DXGI_FORMAT_R32G32_FLOAT
3786 format = F_rg32;
3787 component_type = T_float;
3788 func = read_dds_level_raw;
3789 break;
3790 case 17: // DXGI_FORMAT_R32G32_UINT
3791 format = F_rg32i;
3792 component_type = T_unsigned_int;
3793 func = read_dds_level_raw;
3794 break;
3795 case 18: // DXGI_FORMAT_R32G32_SINT
3796 format = F_rg32i;
3797 component_type = T_int;
3798 func = read_dds_level_raw;
3799 break;
3800 case 27: // DXGI_FORMAT_R8G8B8A8_TYPELESS
3801 case 28: // DXGI_FORMAT_R8G8B8A8_UNORM
3802 format = F_rgba8;
3803 func = read_dds_level_abgr8;
3804 break;
3805 case 29: // DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
3806 format = F_srgb_alpha;
3807 func = read_dds_level_abgr8;
3808 break;
3809 case 30: // DXGI_FORMAT_R8G8B8A8_UINT
3810 format = F_rgba8i;
3811 func = read_dds_level_abgr8;
3812 break;
3813 case 31: // DXGI_FORMAT_R8G8B8A8_SNORM
3814 format = F_rgba8;
3815 component_type = T_byte;
3816 func = read_dds_level_abgr8;
3817 break;
3818 case 32: // DXGI_FORMAT_R8G8B8A8_SINT
3819 format = F_rgba8i;
3820 component_type = T_byte;
3821 func = read_dds_level_abgr8;
3822 break;
3823 case 34: // DXGI_FORMAT_R16G16_FLOAT:
3824 format = F_rg16;
3825 component_type = T_half_float;
3826 func = read_dds_level_raw;
3827 break;
3828 case 35: // DXGI_FORMAT_R16G16_UNORM:
3829 format = F_rg16;
3830 component_type = T_unsigned_short;
3831 func = read_dds_level_raw;
3832 break;
3833 case 36: // DXGI_FORMAT_R16G16_UINT:
3834 format = F_rg16i;
3835 component_type = T_unsigned_short;
3836 func = read_dds_level_raw;
3837 break;
3838 case 37: // DXGI_FORMAT_R16G16_SNORM:
3839 format = F_rg16;
3840 component_type = T_short;
3841 func = read_dds_level_raw;
3842 break;
3843 case 38: // DXGI_FORMAT_R16G16_SINT:
3844 format = F_rg16i;
3845 component_type = T_short;
3846 func = read_dds_level_raw;
3847 break;
3848 case 40: // DXGI_FORMAT_D32_FLOAT
3849 format = F_depth_component32;
3850 component_type = T_float;
3851 func = read_dds_level_raw;
3852 break;
3853 case 41: // DXGI_FORMAT_R32_FLOAT
3854 format = F_r32;
3855 component_type = T_float;
3856 func = read_dds_level_raw;
3857 break;
3858 case 42: // DXGI_FORMAT_R32_UINT
3859 format = F_r32i;
3860 component_type = T_unsigned_int;
3861 func = read_dds_level_raw;
3862 break;
3863 case 43: // DXGI_FORMAT_R32_SINT
3864 format = F_r32i;
3865 component_type = T_int;
3866 func = read_dds_level_raw;
3867 break;
3868 case 48: // DXGI_FORMAT_R8G8_TYPELESS
3869 case 49: // DXGI_FORMAT_R8G8_UNORM
3870 format = F_rg;
3871 break;
3872 case 50: // DXGI_FORMAT_R8G8_UINT
3873 format = F_rg8i;
3874 break;
3875 case 51: // DXGI_FORMAT_R8G8_SNORM
3876 format = F_rg;
3877 component_type = T_byte;
3878 break;
3879 case 52: // DXGI_FORMAT_R8G8_SINT
3880 format = F_rg8i;
3881 component_type = T_byte;
3882 break;
3883 case 54: // DXGI_FORMAT_R16_FLOAT:
3884 format = F_r16;
3885 component_type = T_half_float;
3886 func = read_dds_level_raw;
3887 break;
3888 case 55: // DXGI_FORMAT_D16_UNORM:
3889 format = F_depth_component16;
3890 component_type = T_unsigned_short;
3891 func = read_dds_level_raw;
3892 break;
3893 case 56: // DXGI_FORMAT_R16_UNORM:
3894 format = F_r16;
3895 component_type = T_unsigned_short;
3896 func = read_dds_level_raw;
3897 break;
3898 case 57: // DXGI_FORMAT_R16_UINT:
3899 format = F_r16i;
3900 component_type = T_unsigned_short;
3901 func = read_dds_level_raw;
3902 break;
3903 case 58: // DXGI_FORMAT_R16_SNORM:
3904 format = F_r16;
3905 component_type = T_short;
3906 func = read_dds_level_raw;
3907 break;
3908 case 59: // DXGI_FORMAT_R16_SINT:
3909 format = F_r16i;
3910 component_type = T_short;
3911 func = read_dds_level_raw;
3912 break;
3913 case 60: // DXGI_FORMAT_R8_TYPELESS
3914 case 61: // DXGI_FORMAT_R8_UNORM
3915 format = F_red;
3916 break;
3917 case 62: // DXGI_FORMAT_R8_UINT
3918 format = F_r8i;
3919 break;
3920 case 63: // DXGI_FORMAT_R8_SNORM
3921 format = F_red;
3922 component_type = T_byte;
3923 break;
3924 case 64: // DXGI_FORMAT_R8_SINT
3925 format = F_r8i;
3926 component_type = T_byte;
3927 break;
3928 case 65: // DXGI_FORMAT_A8_UNORM
3929 format = F_alpha;
3930 break;
3931 case 70: // DXGI_FORMAT_BC1_TYPELESS
3932 case 71: // DXGI_FORMAT_BC1_UNORM
3933 format = F_rgb;
3934 compression = CM_dxt1;
3935 func = read_dds_level_bc1;
3936 break;
3937 case 72: // DXGI_FORMAT_BC1_UNORM_SRGB
3938 format = F_srgb;
3939 compression = CM_dxt1;
3940 func = read_dds_level_bc1;
3941 break;
3942 case 73: // DXGI_FORMAT_BC2_TYPELESS
3943 case 74: // DXGI_FORMAT_BC2_UNORM
3944 format = F_rgba;
3945 compression = CM_dxt3;
3946 func = read_dds_level_bc2;
3947 break;
3948 case 75: // DXGI_FORMAT_BC2_UNORM_SRGB
3949 format = F_srgb_alpha;
3950 compression = CM_dxt3;
3951 func = read_dds_level_bc2;
3952 break;
3953 case 76: // DXGI_FORMAT_BC3_TYPELESS
3954 case 77: // DXGI_FORMAT_BC3_UNORM
3955 format = F_rgba;
3956 compression = CM_dxt5;
3957 func = read_dds_level_bc3;
3958 break;
3959 case 78: // DXGI_FORMAT_BC3_UNORM_SRGB
3960 format = F_srgb_alpha;
3961 compression = CM_dxt5;
3962 func = read_dds_level_bc3;
3963 break;
3964 case 79: // DXGI_FORMAT_BC4_TYPELESS
3965 case 80: // DXGI_FORMAT_BC4_UNORM
3966 format = F_red;
3967 compression = CM_rgtc;
3968 func = read_dds_level_bc4;
3969 break;
3970 case 82: // DXGI_FORMAT_BC5_TYPELESS
3971 case 83: // DXGI_FORMAT_BC5_UNORM
3972 format = F_rg;
3973 compression = CM_rgtc;
3974 func = read_dds_level_bc5;
3975 break;
3976 case 87: // DXGI_FORMAT_B8G8R8A8_UNORM
3977 case 90: // DXGI_FORMAT_B8G8R8A8_TYPELESS
3978 format = F_rgba8;
3979 break;
3980 case 88: // DXGI_FORMAT_B8G8R8X8_UNORM
3981 case 92: // DXGI_FORMAT_B8G8R8X8_TYPELESS
3982 format = F_rgb8;
3983 break;
3984 case 91: // DXGI_FORMAT_B8G8R8A8_UNORM_SRGB
3985 format = F_srgb_alpha;
3986 break;
3987 case 93: // DXGI_FORMAT_B8G8R8X8_UNORM_SRGB
3988 format = F_srgb;
3989 break;
3990 case 115: // DXGI_FORMAT_B4G4R4A4_UNORM
3991 format = F_rgba4;
3992 break;
3993 default:
3994 gobj_cat.error()
3995 << filename << ": unsupported DXGI format " << dxgi_format << ".\n";
3996 return false;
3997 }
3998
3999 switch (dimension) {
4000 case 2: // DDS_DIMENSION_TEXTURE1D
4001 texture_type = TT_1d_texture;
4002 header.depth = 1;
4003 break;
4004 case 3: // DDS_DIMENSION_TEXTURE2D
4005 if (misc_flag & 0x4) { // DDS_RESOURCE_MISC_TEXTURECUBE
4006 if (array_size > 1) {
4007 texture_type = TT_cube_map_array;
4008 header.depth = array_size * 6;
4009 } else {
4010 texture_type = TT_cube_map;
4011 header.depth = 6;
4012 }
4013 } else {
4014 if (array_size > 1) {
4015 texture_type = TT_2d_texture_array;
4016 header.depth = array_size;
4017 } else {
4018 texture_type = TT_2d_texture;
4019 header.depth = 1;
4020 }
4021 }
4022 break;
4023 case 4: // DDS_DIMENSION_TEXTURE3D
4024 texture_type = TT_3d_texture;
4025 break;
4026 default:
4027 gobj_cat.error()
4028 << filename << ": unsupported dimension.\n";
4029 return false;
4030 }
4031
4032 } else if (header.pf.pf_flags & DDPF_FOURCC) {
4033 // Some compressed texture format.
4034 if (texture_type == TT_3d_texture) {
4035 gobj_cat.error()
4036 << filename << ": unsupported compression on 3-d texture.\n";
4037 return false;
4038 }
4039
4040 // Most of the compressed formats support alpha.
4041 format = F_rgba;
4042 switch (header.pf.four_cc) {
4043 case 0x31545844: // 'DXT1', little-endian.
4044 compression = CM_dxt1;
4045 func = read_dds_level_bc1;
4046 format = F_rgbm;
4047 break;
4048 case 0x32545844: // 'DXT2'
4049 compression = CM_dxt2;
4050 func = read_dds_level_bc2;
4051 break;
4052 case 0x33545844: // 'DXT3'
4053 compression = CM_dxt3;
4054 func = read_dds_level_bc2;
4055 break;
4056 case 0x34545844: // 'DXT4'
4057 compression = CM_dxt4;
4058 func = read_dds_level_bc3;
4059 break;
4060 case 0x35545844: // 'DXT5'
4061 compression = CM_dxt5;
4062 func = read_dds_level_bc3;
4063 break;
4064 case 0x31495441: // 'ATI1'
4065 case 0x55344342: // 'BC4U'
4066 compression = CM_rgtc;
4067 func = read_dds_level_bc4;
4068 format = F_red;
4069 break;
4070 case 0x32495441: // 'ATI2'
4071 case 0x55354342: // 'BC5U'
4072 compression = CM_rgtc;
4073 func = read_dds_level_bc5;
4074 format = F_rg;
4075 break;
4076 case 36: // D3DFMT_A16B16G16R16
4077 func = read_dds_level_abgr16;
4078 format = F_rgba16;
4079 component_type = T_unsigned_short;
4080 break;
4081 case 110: // D3DFMT_Q16W16V16U16
4082 func = read_dds_level_abgr16;
4083 format = F_rgba16;
4084 component_type = T_short;
4085 break;
4086 case 113: // D3DFMT_A16B16G16R16F
4087 func = read_dds_level_abgr16;
4088 format = F_rgba16;
4089 component_type = T_half_float;
4090 break;
4091 case 116: // D3DFMT_A32B32G32R32F
4092 func = read_dds_level_abgr32;
4093 format = F_rgba32;
4094 component_type = T_float;
4095 break;
4096 default:
4097 gobj_cat.error()
4098 << filename << ": unsupported texture compression (FourCC: 0x"
4099 << std::hex << header.pf.four_cc << std::dec << ").\n";
4100 return false;
4101 }
4102
4103 } else {
4104 // An uncompressed texture format.
4105 func = read_dds_level_generic_uncompressed;
4106
4107 if (header.pf.pf_flags & DDPF_ALPHAPIXELS) {
4108 // An uncompressed format that involves alpha.
4109 format = F_rgba;
4110 if (header.pf.rgb_bitcount == 32 &&
4111 header.pf.r_mask == 0x000000ff &&
4112 header.pf.g_mask == 0x0000ff00 &&
4113 header.pf.b_mask == 0x00ff0000 &&
4114 header.pf.a_mask == 0xff000000U) {
4115 func = read_dds_level_abgr8;
4116 } else if (header.pf.rgb_bitcount == 32 &&
4117 header.pf.r_mask == 0x00ff0000 &&
4118 header.pf.g_mask == 0x0000ff00 &&
4119 header.pf.b_mask == 0x000000ff &&
4120 header.pf.a_mask == 0xff000000U) {
4121 func = read_dds_level_rgba8;
4122
4123 } else if (header.pf.r_mask != 0 &&
4124 header.pf.g_mask == 0 &&
4125 header.pf.b_mask == 0) {
4126 func = read_dds_level_luminance_uncompressed;
4127 format = F_luminance_alpha;
4128 }
4129 } else {
4130 // An uncompressed format that doesn't involve alpha.
4131 if (header.pf.rgb_bitcount == 24 &&
4132 header.pf.r_mask == 0x00ff0000 &&
4133 header.pf.g_mask == 0x0000ff00 &&
4134 header.pf.b_mask == 0x000000ff) {
4135 func = read_dds_level_bgr8;
4136 } else if (header.pf.rgb_bitcount == 24 &&
4137 header.pf.r_mask == 0x000000ff &&
4138 header.pf.g_mask == 0x0000ff00 &&
4139 header.pf.b_mask == 0x00ff0000) {
4140 func = read_dds_level_rgb8;
4141
4142 } else if (header.pf.r_mask != 0 &&
4143 header.pf.g_mask == 0 &&
4144 header.pf.b_mask == 0) {
4145 func = read_dds_level_luminance_uncompressed;
4146 format = F_luminance;
4147 }
4148 }
4149 }
4150
4151 do_setup_texture(cdata, texture_type, header.width, header.height, header.depth,
4152 component_type, format);
4153
4154 cdata->_orig_file_x_size = cdata->_x_size;
4155 cdata->_orig_file_y_size = cdata->_y_size;
4156 cdata->_compression = compression;
4157 cdata->_ram_image_compression = compression;
4158
4159 if (!header_only) {
4160 switch (texture_type) {
4161 case TT_3d_texture:
4162 {
4163 // 3-d textures store all the depth slices for mipmap level 0, then
4164 // all the depth slices for mipmap level 1, and so on.
4165 for (int n = 0; n < (int)header.num_levels; ++n) {
4166 int z_size = do_get_expected_mipmap_z_size(cdata, n);
4167 pvector<PTA_uchar> pages;
4168 size_t page_size = 0;
4169 int z;
4170 for (z = 0; z < z_size; ++z) {
4171 PTA_uchar page = func(this, cdata, header, n, in);
4172 if (page.is_null()) {
4173 return false;
4174 }
4175 nassertr(page_size == 0 || page_size == page.size(), false);
4176 page_size = page.size();
4177 pages.push_back(page);
4178 }
4179 // Now reassemble the pages into one big image. Because this is a
4180 // Microsoft format, the images are stacked in reverse order; re-
4181 // reverse them.
4182 PTA_uchar image = PTA_uchar::empty_array(page_size * z_size);
4183 unsigned char *imagep = (unsigned char *)image.p();
4184 for (z = 0; z < z_size; ++z) {
4185 int fz = z_size - 1 - z;
4186 memcpy(imagep + z * page_size, pages[fz].p(), page_size);
4187 }
4188
4189 do_set_ram_mipmap_image(cdata, n, image, page_size);
4190 }
4191 }
4192 break;
4193
4194 case TT_cube_map:
4195 {
4196 // Cube maps store all the mipmap levels for face 0, then all the
4197 // mipmap levels for face 1, and so on.
4199 pages.reserve(6);
4200 int z, n;
4201 for (z = 0; z < 6; ++z) {
4202 pages.push_back(pvector<PTA_uchar>());
4203 pvector<PTA_uchar> &levels = pages.back();
4204 levels.reserve(header.num_levels);
4205
4206 for (n = 0; n < (int)header.num_levels; ++n) {
4207 PTA_uchar image = func(this, cdata, header, n, in);
4208 if (image.is_null()) {
4209 return false;
4210 }
4211 levels.push_back(image);
4212 }
4213 }
4214
4215 // Now, for each level, reassemble the pages into one big image.
4216 // Because this is a Microsoft format, the levels are arranged in a
4217 // rotated order.
4218 static const int level_remap[6] = {
4219 0, 1, 5, 4, 2, 3
4220 };
4221 for (n = 0; n < (int)header.num_levels; ++n) {
4222 size_t page_size = pages[0][n].size();
4223 PTA_uchar image = PTA_uchar::empty_array(page_size * 6);
4224 unsigned char *imagep = (unsigned char *)image.p();
4225 for (z = 0; z < 6; ++z) {
4226 int fz = level_remap[z];
4227 nassertr(pages[fz][n].size() == page_size, false);
4228 memcpy(imagep + z * page_size, pages[fz][n].p(), page_size);
4229 }
4230
4231 do_set_ram_mipmap_image(cdata, n, image, page_size);
4232 }
4233 }
4234 break;
4235
4236 case TT_2d_texture_array:
4237 case TT_cube_map_array: //TODO: rearrange cube map array faces?
4238 {
4239 // Texture arrays store all the mipmap levels for layer 0, then all
4240 // the mipmap levels for layer 1, and so on.
4242 pages.reserve(header.depth);
4243 int z, n;
4244 for (z = 0; z < (int)header.depth; ++z) {
4245 pages.push_back(pvector<PTA_uchar>());
4246 pvector<PTA_uchar> &levels = pages.back();
4247 levels.reserve(header.num_levels);
4248
4249 for (n = 0; n < (int)header.num_levels; ++n) {
4250 PTA_uchar image = func(this, cdata, header, n, in);
4251 if (image.is_null()) {
4252 return false;
4253 }
4254 levels.push_back(image);
4255 }
4256 }
4257
4258 // Now, for each level, reassemble the pages into one big image.
4259 for (n = 0; n < (int)header.num_levels; ++n) {
4260 size_t page_size = pages[0][n].size();
4261 PTA_uchar image = PTA_uchar::empty_array(page_size * header.depth);
4262 unsigned char *imagep = (unsigned char *)image.p();
4263 for (z = 0; z < (int)header.depth; ++z) {
4264 nassertr(pages[z][n].size() == page_size, false);
4265 memcpy(imagep + z * page_size, pages[z][n].p(), page_size);
4266 }
4267
4268 do_set_ram_mipmap_image(cdata, n, image, page_size);
4269 }
4270 }
4271 break;
4272
4273 default:
4274 // Normal 2-d textures simply store the mipmap levels.
4275 {
4276 for (int n = 0; n < (int)header.num_levels; ++n) {
4277 PTA_uchar image = func(this, cdata, header, n, in);
4278 if (image.is_null()) {
4279 return false;
4280 }
4281 do_set_ram_mipmap_image(cdata, n, image, 0);
4282 }
4283 }
4284 }
4285 cdata->_has_read_pages = true;
4286 cdata->_has_read_mipmaps = true;
4287 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
4288 }
4289
4290 if (in.fail()) {
4291 gobj_cat.error()
4292 << filename << ": truncated DDS file.\n";
4293 return false;
4294 }
4295
4296 cdata->_loaded_from_image = true;
4297 cdata->_loaded_from_txo = true;
4298
4299 return true;
4300}
4301
4302/**
4303 * Called internally when read() detects a KTX file. Assumes the lock is
4304 * already held.
4305 */
4306bool Texture::
4307do_read_ktx_file(CData *cdata, const Filename &fullpath, bool header_only) {
4309
4310 Filename filename = Filename::binary_filename(fullpath);
4311 PT(VirtualFile) file = vfs->get_file(filename);
4312 if (file == nullptr) {
4313 // No such file.
4314 gobj_cat.error()
4315 << "Could not find " << fullpath << "\n";
4316 return false;
4317 }
4318
4319 if (gobj_cat.is_debug()) {
4320 gobj_cat.debug()
4321 << "Reading KTX file " << filename << "\n";
4322 }
4323
4324 istream *in = file->open_read_file(true);
4325 if (in == nullptr) {
4326 gobj_cat.error()
4327 << "Failed to open " << filename << " for reading.\n";
4328 return false;
4329 }
4330
4331 bool success = do_read_ktx(cdata, *in, fullpath, header_only);
4332 vfs->close_read_file(in);
4333
4334 if (!has_name()) {
4335 set_name(fullpath.get_basename_wo_extension());
4336 }
4337
4338 cdata->_fullpath = fullpath;
4339 cdata->_alpha_fullpath = Filename();
4340 cdata->_keep_ram_image = false;
4341
4342 return success;
4343}
4344
4345/**
4346 *
4347 */
4348bool Texture::
4349do_read_ktx(CData *cdata, istream &in, const string &filename, bool header_only) {
4350 StreamReader ktx(in);
4351
4352 unsigned char magic[12];
4353 if (ktx.extract_bytes(magic, 12) != 12 ||
4354 memcmp(magic, "\xABKTX 11\xBB\r\n\x1A\n", 12) != 0) {
4355 gobj_cat.error()
4356 << filename << " is not a KTX file.\n";
4357 return false;
4358 }
4359
4360 // See: https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/
4361 uint32_t gl_type, /*type_size,*/ gl_format, internal_format, gl_base_format,
4362 width, height, depth, num_array_elements, num_faces, num_mipmap_levels,
4363 kvdata_size;
4364
4365 bool big_endian;
4366 if (ktx.get_uint32() == 0x04030201) {
4367 big_endian = false;
4368 gl_type = ktx.get_uint32();
4369 /*type_size = */ktx.get_uint32();
4370 gl_format = ktx.get_uint32();
4371 internal_format = ktx.get_uint32();
4372 gl_base_format = ktx.get_uint32();
4373 width = ktx.get_uint32();
4374 height = ktx.get_uint32();
4375 depth = ktx.get_uint32();
4376 num_array_elements = ktx.get_uint32();
4377 num_faces = ktx.get_uint32();
4378 num_mipmap_levels = ktx.get_uint32();
4379 kvdata_size = ktx.get_uint32();
4380 } else {
4381 big_endian = true;
4382 gl_type = ktx.get_be_uint32();
4383 /*type_size = */ktx.get_be_uint32();
4384 gl_format = ktx.get_be_uint32();
4385 internal_format = ktx.get_be_uint32();
4386 gl_base_format = ktx.get_be_uint32();
4387 width = ktx.get_be_uint32();
4388 height = ktx.get_be_uint32();
4389 depth = ktx.get_be_uint32();
4390 num_array_elements = ktx.get_be_uint32();
4391 num_faces = ktx.get_be_uint32();
4392 num_mipmap_levels = ktx.get_be_uint32();
4393 kvdata_size = ktx.get_be_uint32();
4394 }
4395
4396 // Skip metadata section.
4397 ktx.skip_bytes(kvdata_size);
4398
4399 ComponentType type;
4400 CompressionMode compression;
4401 Format format;
4402 bool swap_bgr = false;
4403
4404 if (gl_type == 0 || gl_format == 0) {
4405 // Compressed texture.
4406 if (gl_type > 0 || gl_format > 0) {
4407 gobj_cat.error()
4408 << "Compressed textures must have both type and format set to 0.\n";
4409 return false;
4410 }
4411 type = T_unsigned_byte;
4412 compression = CM_on;
4413
4414 KTXFormat base_format;
4415 switch ((KTXCompressedFormat)internal_format) {
4416 case KTX_COMPRESSED_RED:
4417 format = F_red;
4418 base_format = KTX_RED;
4419 break;
4420 case KTX_COMPRESSED_RG:
4421 format = F_rg;
4422 base_format = KTX_RG;
4423 break;
4424 case KTX_COMPRESSED_RGB:
4425 format = F_rgb;
4426 base_format = KTX_RGB;
4427 break;
4428 case KTX_COMPRESSED_RGBA:
4429 format = F_rgba;
4430 base_format = KTX_RGBA;
4431 break;
4432 case KTX_COMPRESSED_SRGB:
4433 format = F_srgb;
4434 base_format = KTX_SRGB;
4435 break;
4436 case KTX_COMPRESSED_SRGB_ALPHA:
4437 format = F_srgb_alpha;
4438 base_format = KTX_SRGB_ALPHA;
4439 break;
4440 case KTX_COMPRESSED_RGB_FXT1_3DFX:
4441 format = F_rgb;
4442 base_format = KTX_RGB;
4443 compression = CM_fxt1;
4444 break;
4445 case KTX_COMPRESSED_RGBA_FXT1_3DFX:
4446 format = F_rgba;
4447 base_format = KTX_RGBA;
4448 compression = CM_fxt1;
4449 break;
4450 case KTX_COMPRESSED_RGB_S3TC_DXT1:
4451 format = F_rgb;
4452 base_format = KTX_RGB;
4453 compression = CM_dxt1;
4454 break;
4455 case KTX_COMPRESSED_RGBA_S3TC_DXT1:
4456 format = F_rgbm;
4457 base_format = KTX_RGB;
4458 compression = CM_dxt1;
4459 break;
4460 case KTX_COMPRESSED_RGBA_S3TC_DXT3:
4461 format = F_rgba;
4462 base_format = KTX_RGBA;
4463 compression = CM_dxt3;
4464 break;
4465 case KTX_COMPRESSED_RGBA_S3TC_DXT5:
4466 format = F_rgba;
4467 base_format = KTX_RGBA;
4468 compression = CM_dxt5;
4469 break;
4470 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1:
4471 format = F_srgb_alpha;
4472 base_format = KTX_SRGB_ALPHA;
4473 compression = CM_dxt1;
4474 break;
4475 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3:
4476 format = F_srgb_alpha;
4477 base_format = KTX_SRGB_ALPHA;
4478 compression = CM_dxt3;
4479 break;
4480 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5:
4481 format = F_srgb_alpha;
4482 base_format = KTX_SRGB_ALPHA;
4483 compression = CM_dxt5;
4484 break;
4485 case KTX_COMPRESSED_SRGB_S3TC_DXT1:
4486 format = F_srgb;
4487 base_format = KTX_SRGB;
4488 compression = CM_dxt1;
4489 break;
4490 case KTX_COMPRESSED_RED_RGTC1:
4491 case KTX_COMPRESSED_SIGNED_RED_RGTC1:
4492 format = F_red;
4493 base_format = KTX_RED;
4494 compression = CM_rgtc;
4495 break;
4496 case KTX_COMPRESSED_RG_RGTC2:
4497 case KTX_COMPRESSED_SIGNED_RG_RGTC2:
4498 format = F_rg;
4499 base_format = KTX_RG;
4500 compression = CM_rgtc;
4501 break;
4502 case KTX_ETC1_RGB8:
4503 format = F_rgb;
4504 base_format = KTX_RGB;
4505 compression = CM_etc1;
4506 break;
4507 case KTX_ETC1_SRGB8:
4508 format = F_srgb;
4509 base_format = KTX_SRGB;
4510 compression = CM_etc1;
4511 break;
4512 case KTX_COMPRESSED_RGB8_ETC2:
4513 format = F_rgb;
4514 base_format = KTX_RGB;
4515 compression = CM_etc2;
4516 break;
4517 case KTX_COMPRESSED_SRGB8_ETC2:
4518 format = F_srgb;
4519 base_format = KTX_SRGB;
4520 compression = CM_etc2;
4521 break;
4522 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4523 format = F_rgbm;
4524 base_format = KTX_RGBA;
4525 compression = CM_etc2;
4526 break;
4527 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4528 format = F_rgbm;
4529 base_format = KTX_SRGB8_ALPHA8;
4530 compression = CM_etc2;
4531 break;
4532 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
4533 format = F_rgba;
4534 base_format = KTX_RGBA;
4535 compression = CM_etc2;
4536 break;
4537 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
4538 format = F_srgb_alpha;
4539 base_format = KTX_SRGB8_ALPHA8;
4540 compression = CM_etc2;
4541 break;
4542 case KTX_COMPRESSED_R11_EAC:
4543 case KTX_COMPRESSED_SIGNED_R11_EAC:
4544 format = F_red;
4545 base_format = KTX_RED;
4546 compression = CM_eac;
4547 break;
4548 case KTX_COMPRESSED_RG11_EAC:
4549 case KTX_COMPRESSED_SIGNED_RG11_EAC:
4550 format = F_rg;
4551 base_format = KTX_RG;
4552 compression = CM_eac;
4553 break;
4554 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1:
4555 format = F_srgb_alpha;
4556 base_format = KTX_SRGB_ALPHA;
4557 compression = CM_pvr1_2bpp;
4558 break;
4559 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1:
4560 format = F_srgb_alpha;
4561 base_format = KTX_SRGB_ALPHA;
4562 compression = CM_pvr1_4bpp;
4563 break;
4564 case KTX_COMPRESSED_RGBA_BPTC_UNORM:
4565 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM:
4566 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT:
4567 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT:
4568 default:
4569 gobj_cat.error()
4570 << filename << " has unsupported compressed internal format " << internal_format << "\n";
4571 return false;
4572 }
4573
4574 if (base_format != gl_base_format) {
4575 gobj_cat.error()
4576 << filename << " has internal format that is incompatible with base "
4577 "format (0x" << std::hex << gl_base_format << ", expected 0x"
4578 << base_format << std::dec << ")\n";
4579 return false;
4580 }
4581
4582 } else {
4583 // Uncompressed texture.
4584 compression = CM_off;
4585 switch ((KTXType)gl_type) {
4586 case KTX_BYTE:
4587 type = T_byte;
4588 break;
4589 case KTX_UNSIGNED_BYTE:
4590 type = T_unsigned_byte;
4591 break;
4592 case KTX_SHORT:
4593 type = T_short;
4594 break;
4595 case KTX_UNSIGNED_SHORT:
4596 type = T_unsigned_short;
4597 break;
4598 case KTX_INT:
4599 type = T_int;
4600 break;
4601 case KTX_UNSIGNED_INT:
4602 type = T_unsigned_int;
4603 break;
4604 case KTX_FLOAT:
4605 type = T_float;
4606 break;
4607 case KTX_HALF_FLOAT:
4608 type = T_half_float;
4609 break;
4610 case KTX_UNSIGNED_INT_24_8:
4611 type = T_unsigned_int_24_8;
4612 break;
4613 default:
4614 gobj_cat.error()
4615 << filename << " has unsupported component type " << gl_type << "\n";
4616 return false;
4617 }
4618
4619 if (gl_format != gl_base_format) {
4620 gobj_cat.error()
4621 << filename << " has mismatched formats: " << gl_format << " != "
4622 << gl_base_format << "\n";
4623 }
4624
4625 switch (gl_format) {
4626 case KTX_DEPTH_COMPONENT:
4627 switch (internal_format) {
4628 case KTX_DEPTH_COMPONENT:
4629 format = F_depth_component;
4630 break;
4631 case KTX_DEPTH_COMPONENT16:
4632 format = F_depth_component16;
4633 break;
4634 case KTX_DEPTH_COMPONENT24:
4635 format = F_depth_component24;
4636 break;
4637 case KTX_DEPTH_COMPONENT32:
4638 case KTX_DEPTH_COMPONENT32F:
4639 format = F_depth_component32;
4640 break;
4641 default:
4642 format = F_depth_component;
4643 gobj_cat.warning()
4644 << filename << " has unsupported depth component format " << internal_format << "\n";
4645 }
4646 break;
4647
4648 case KTX_DEPTH_STENCIL:
4649 format = F_depth_stencil;
4650 if (internal_format != KTX_DEPTH_STENCIL &&
4651 internal_format != KTX_DEPTH24_STENCIL8) {
4652 gobj_cat.warning()
4653 << filename << " has unsupported depth stencil format " << internal_format << "\n";
4654 }
4655 break;
4656
4657 case KTX_RED:
4658 switch (internal_format) {
4659 case KTX_RED:
4660 case KTX_RED_SNORM:
4661 case KTX_R8:
4662 case KTX_R8_SNORM:
4663 format = F_red;
4664 break;
4665 case KTX_R16:
4666 case KTX_R16_SNORM:
4667 case KTX_R16F:
4668 format = F_r16;
4669 break;
4670 case KTX_R32F:
4671 format = F_r32;
4672 break;
4673 default:
4674 format = F_red;
4675 gobj_cat.warning()
4676 << filename << " has unsupported red format " << internal_format << "\n";
4677 }
4678 break;
4679
4680 case KTX_RED_INTEGER:
4681 switch (internal_format) {
4682 case KTX_R8I:
4683 case KTX_R8UI:
4684 format = F_r8i;
4685 break;
4686 case KTX_R16I:
4687 case KTX_R16UI:
4688 format = F_r16i;
4689 break;
4690 case KTX_R32I:
4691 case KTX_R32UI:
4692 format = F_r32i;
4693 break;
4694 default:
4695 gobj_cat.error()
4696 << filename << " has unsupported red integer format " << internal_format << "\n";
4697 return false;
4698 }
4699 break;
4700
4701 case KTX_GREEN:
4702 format = F_green;
4703 if (internal_format != KTX_GREEN) {
4704 gobj_cat.warning()
4705 << filename << " has unsupported green format " << internal_format << "\n";
4706 }
4707 break;
4708
4709 case KTX_BLUE:
4710 format = F_blue;
4711 if (internal_format != KTX_BLUE) {
4712 gobj_cat.warning()
4713 << filename << " has unsupported blue format " << internal_format << "\n";
4714 }
4715 break;
4716
4717 case KTX_RG:
4718 switch (internal_format) {
4719 case KTX_RG:
4720 case KTX_RG_SNORM:
4721 case KTX_RG8:
4722 case KTX_RG8_SNORM:
4723 format = F_rg;
4724 break;
4725 case KTX_RG16:
4726 case KTX_RG16_SNORM:
4727 case KTX_RG16F:
4728 format = F_rg16;
4729 break;
4730 case KTX_RG32F:
4731 format = F_rg32;
4732 break;
4733 default:
4734 format = F_rg;
4735 gobj_cat.warning()
4736 << filename << " has unsupported RG format " << internal_format << "\n";
4737 }
4738 break;
4739
4740 case KTX_RG_INTEGER:
4741 switch (internal_format) {
4742 case KTX_RG8I:
4743 case KTX_RG8UI:
4744 format = F_rg8i;
4745 break;
4746 case KTX_RG16I:
4747 case KTX_RG16UI:
4748 format = F_rg16i;
4749 break;
4750 case KTX_RG32I:
4751 case KTX_RG32UI:
4752 format = F_rg32i;
4753 break;
4754 default:
4755 gobj_cat.error()
4756 << filename << " has unsupported RG integer format " << internal_format << "\n";
4757 return false;
4758 }
4759 break;
4760
4761 case KTX_RGB:
4762 swap_bgr = true;
4763 case KTX_BGR:
4764 switch (internal_format) {
4765 case KTX_RGB:
4766 case KTX_RGB_SNORM:
4767 format = F_rgb;
4768 break;
4769 case KTX_RGB5:
4770 format = F_rgb5;
4771 break;
4772 case KTX_RGB12:
4773 format = F_rgb12;
4774 break;
4775 case KTX_R3_G3_B2:
4776 format = F_rgb332;
4777 break;
4778 case KTX_RGB9_E5:
4779 format = F_rgb9_e5;
4780 break;
4781 case KTX_R11F_G11F_B10F:
4782 format = F_r11_g11_b10;
4783 break;
4784 case KTX_RGB8:
4785 case KTX_RGB8_SNORM:
4786 format = F_rgb8;
4787 break;
4788 case KTX_RGB16:
4789 case KTX_RGB16_SNORM:
4790 case KTX_RGB16F:
4791 format = F_rgb16;
4792 break;
4793 case KTX_RGB32F:
4794 format = F_rgb32;
4795 break;
4796 case KTX_SRGB:
4797 case KTX_SRGB8:
4798 format = F_srgb;
4799 break;
4800 default:
4801 format = F_rgb;
4802 gobj_cat.warning()
4803 << filename << " has unsupported RGB format " << internal_format << "\n";
4804 }
4805 break;
4806
4807 case KTX_RGB_INTEGER:
4808 swap_bgr = true;
4809 case KTX_BGR_INTEGER:
4810 switch (internal_format) {
4811 case KTX_RGB8I:
4812 case KTX_RGB8UI:
4813 format = F_rgb8i;
4814 break;
4815 case KTX_RGB16I:
4816 case KTX_RGB16UI:
4817 format = F_rgb16i;
4818 break;
4819 case KTX_RGB32I:
4820 case KTX_RGB32UI:
4821 format = F_rgb32i;
4822 break;
4823 default:
4824 gobj_cat.error()
4825 << filename << " has unsupported RGB integer format " << internal_format << "\n";
4826 return false;
4827 }
4828 break;
4829
4830 case KTX_RGBA:
4831 swap_bgr = true;
4832 case KTX_BGRA:
4833 switch (internal_format) {
4834 case KTX_RGBA:
4835 case KTX_RGBA_SNORM:
4836 format = F_rgba;
4837 break;
4838 case KTX_RGBA4:
4839 format = F_rgba4;
4840 break;
4841 case KTX_RGB5_A1:
4842 format = F_rgba5;
4843 break;
4844 case KTX_RGBA12:
4845 format = F_rgba12;
4846 break;
4847 case KTX_RGB10_A2:
4848 format = F_rgb10_a2;
4849 break;
4850 case KTX_RGBA8:
4851 case KTX_RGBA8_SNORM:
4852 format = F_rgba8;
4853 break;
4854 case KTX_RGBA16:
4855 case KTX_RGBA16_SNORM:
4856 case KTX_RGBA16F:
4857 format = F_rgba16;
4858 break;
4859 case KTX_RGBA32F:
4860 format = F_rgba32;
4861 break;
4862 case KTX_SRGB_ALPHA:
4863 case KTX_SRGB8_ALPHA8:
4864 format = F_srgb_alpha;
4865 break;
4866 default:
4867 format = F_rgba;
4868 gobj_cat.warning()
4869 << filename << " has unsupported RGBA format " << internal_format << "\n";
4870 }
4871 break;
4872 break;
4873
4874 case KTX_RGBA_INTEGER:
4875 swap_bgr = true;
4876 case KTX_BGRA_INTEGER:
4877 switch (internal_format) {
4878 case KTX_RGBA8I:
4879 case KTX_RGBA8UI:
4880 format = F_rgba8i;
4881 break;
4882 case KTX_RGBA16I:
4883 case KTX_RGBA16UI:
4884 format = F_rgba16i;
4885 break;
4886 case KTX_RGBA32I:
4887 case KTX_RGBA32UI:
4888 format = F_rgba32i;
4889 break;
4890 default:
4891 gobj_cat.error()
4892 << filename << " has unsupported RGBA integer format " << internal_format << "\n";
4893 return false;
4894 }
4895 break;
4896
4897 case KTX_LUMINANCE:
4898 format = F_luminance;
4899 break;
4900
4901 case KTX_LUMINANCE_ALPHA:
4902 format = F_luminance_alpha;
4903 break;
4904
4905 case KTX_ALPHA:
4906 format = F_alpha;
4907 break;
4908
4909 case KTX_STENCIL_INDEX:
4910 default:
4911 gobj_cat.error()
4912 << filename << " has unsupported format " << gl_format << "\n";
4913 return false;
4914 }
4915 }
4916
4917 TextureType texture_type;
4918 if (depth > 0) {
4919 texture_type = TT_3d_texture;
4920
4921 } else if (num_faces > 1) {
4922 if (num_faces != 6) {
4923 gobj_cat.error()
4924 << filename << " has " << num_faces << " cube map faces, expected 6\n";
4925 return false;
4926 }
4927 if (width != height) {
4928 gobj_cat.error()
4929 << filename << " is cube map, but does not have square dimensions\n";
4930 return false;
4931 }
4932 if (num_array_elements > 0) {
4933 depth = num_array_elements * 6;
4934 texture_type = TT_cube_map_array;
4935 } else {
4936 depth = 6;
4937 texture_type = TT_cube_map;
4938 }
4939
4940 } else if (height > 0) {
4941 if (num_array_elements > 0) {
4942 depth = num_array_elements;
4943 texture_type = TT_2d_texture_array;
4944 } else {
4945 depth = 1;
4946 texture_type = TT_2d_texture;
4947 }
4948
4949 } else if (width > 0) {
4950 depth = 1;
4951 if (num_array_elements > 0) {
4952 height = num_array_elements;
4953 texture_type = TT_1d_texture_array;
4954 } else {
4955 height = 1;
4956 texture_type = TT_1d_texture;
4957 }
4958
4959 } else {
4960 gobj_cat.error()
4961 << filename << " has zero size\n";
4962 return false;
4963 }
4964
4965 do_setup_texture(cdata, texture_type, width, height, depth, type, format);
4966
4967 cdata->_orig_file_x_size = cdata->_x_size;
4968 cdata->_orig_file_y_size = cdata->_y_size;
4969 cdata->_compression = compression;
4970 cdata->_ram_image_compression = compression;
4971
4972 if (!header_only) {
4973 bool generate_mipmaps = false;
4974 if (num_mipmap_levels == 0) {
4975 generate_mipmaps = true;
4976 num_mipmap_levels = 1;
4977 }
4978
4979 for (uint32_t n = 0; n < num_mipmap_levels; ++n) {
4980 uint32_t image_size;
4981 if (big_endian) {
4982 image_size = ktx.get_be_uint32();
4983 } else {
4984 image_size = ktx.get_uint32();
4985 }
4986 PTA_uchar image;
4987
4988 if (compression == CM_off) {
4989 uint32_t row_size = do_get_expected_mipmap_x_size(cdata, (int)n) * cdata->_num_components * cdata->_component_width;
4990 uint32_t num_rows = do_get_expected_mipmap_y_size(cdata, (int)n) * do_get_expected_mipmap_z_size(cdata, (int)n);
4991 uint32_t row_padded = (row_size + 3) & ~3;
4992
4993 if (image_size == row_size * num_rows) {
4994 if (row_padded != row_size) {
4995 // Someone tightly packed the image. This is invalid, but because
4996 // we like it tightly packed too, we'll read it anyway.
4997 gobj_cat.warning()
4998 << filename << " does not have proper row padding for mipmap "
4999 "level " << n << "\n";
5000 }
5001 image = PTA_uchar::empty_array(image_size);
5002 ktx.extract_bytes(image.p(), image_size);
5003
5004 } else if (image_size != row_padded * num_rows) {
5005 gobj_cat.error()
5006 << filename << " has invalid image size " << image_size
5007 << " for mipmap level " << n << " (expected "
5008 << row_padded * num_rows << ")\n";
5009 return false;
5010
5011 } else {
5012 // Read it row by row.
5013 image = PTA_uchar::empty_array(row_size * num_rows);
5014 uint32_t skip = row_padded - row_size;
5015 unsigned char *p = image.p();
5016 for (uint32_t row = 0; row < num_rows; ++row) {
5017 ktx.extract_bytes(p, row_size);
5018 ktx.skip_bytes(skip);
5019 p += row_size;
5020 }
5021 }
5022
5023 // Swap red and blue channels if necessary to match Panda conventions.
5024 if (swap_bgr) {
5025 unsigned char *begin = image.p();
5026 const unsigned char *end = image.p() + image.size();
5027 size_t skip = cdata->_num_components;
5028 nassertr(skip == 3 || skip == 4, false);
5029
5030 switch (cdata->_component_width) {
5031 case 1:
5032 for (unsigned char *p = begin; p < end; p += skip) {
5033 swap(p[0], p[2]);
5034 }
5035 break;
5036 case 2:
5037 for (short *p = (short *)begin; p < (short *)end; p += skip) {
5038 swap(p[0], p[2]);
5039 }
5040 break;
5041 case 4:
5042 for (int *p = (int *)begin; p < (int *)end; p += skip) {
5043 swap(p[0], p[2]);
5044 }
5045 break;
5046 default:
5047 nassert_raise("unexpected channel count");
5048 return false;
5049 }
5050 }
5051
5052 do_set_ram_mipmap_image(cdata, (int)n, std::move(image),
5053 row_size * do_get_expected_mipmap_y_size(cdata, (int)n));
5054
5055 } else {
5056 // Compressed image. We'll trust that the file has the right size.
5057 image = PTA_uchar::empty_array(image_size);
5058 ktx.extract_bytes(image.p(), image_size);
5059 do_set_ram_mipmap_image(cdata, (int)n, std::move(image), image_size / depth);
5060 }
5061
5062 ktx.skip_bytes(3 - ((image_size + 3) & 3));
5063 }
5064
5065 cdata->_has_read_pages = true;
5066 cdata->_has_read_mipmaps = true;
5067 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
5068
5069 if (generate_mipmaps) {
5070 do_generate_ram_mipmap_images(cdata, false);
5071 }
5072 }
5073
5074 if (in.fail()) {
5075 gobj_cat.error()
5076 << filename << ": truncated KTX file.\n";
5077 return false;
5078 }
5079
5080 cdata->_loaded_from_image = true;
5081 cdata->_loaded_from_txo = true;
5082
5083 return true;
5084}
5085
5086/**
5087 * Internal method to write a series of pages and/or mipmap levels to disk
5088 * files.
5089 */
5090bool Texture::
5091do_write(CData *cdata,
5092 const Filename &fullpath, int z, int n, bool write_pages, bool write_mipmaps) {
5093 if (is_txo_filename(fullpath)) {
5094 if (!do_has_bam_rawdata(cdata)) {
5095 do_get_bam_rawdata(cdata);
5096 }
5097 nassertr(do_has_bam_rawdata(cdata), false);
5098 return do_write_txo_file(cdata, fullpath);
5099 }
5100
5101 if (!do_has_uncompressed_ram_image(cdata)) {
5102 do_get_uncompressed_ram_image(cdata);
5103 }
5104
5105 nassertr(do_has_ram_mipmap_image(cdata, n), false);
5106 nassertr(cdata->_ram_image_compression == CM_off, false);
5107
5108 if (write_pages && write_mipmaps) {
5109 // Write a sequence of pages * mipmap levels.
5110 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5111 int num_levels = cdata->_ram_images.size();
5112
5113 for (int n = 0; n < num_levels; ++n) {
5114 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
5115
5116 for (z = 0; z < num_pages; ++z) {
5117 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
5118
5119 if (!n_pattern.has_hash()) {
5120 gobj_cat.error()
5121 << "Filename requires two different hash sequences: " << fullpath
5122 << "\n";
5123 return false;
5124 }
5125
5126 if (!do_write_one(cdata, n_pattern.get_filename_index(n), z, n)) {
5127 return false;
5128 }
5129 }
5130 }
5131
5132 } else if (write_pages) {
5133 // Write a sequence of pages.
5134 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5135 if (!fullpath_pattern.has_hash()) {
5136 gobj_cat.error()
5137 << "Filename requires a hash mark: " << fullpath
5138 << "\n";
5139 return false;
5140 }
5141
5142 int num_pages = cdata->_z_size * cdata->_num_views;
5143 for (z = 0; z < num_pages; ++z) {
5144 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(z), z, n)) {
5145 return false;
5146 }
5147 }
5148
5149 } else if (write_mipmaps) {
5150 // Write a sequence of mipmap images.
5151 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5152 if (!fullpath_pattern.has_hash()) {
5153 gobj_cat.error()
5154 << "Filename requires a hash mark: " << fullpath
5155 << "\n";
5156 return false;
5157 }
5158
5159 int num_levels = cdata->_ram_images.size();
5160 for (int n = 0; n < num_levels; ++n) {
5161 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(n), z, n)) {
5162 return false;
5163 }
5164 }
5165
5166 } else {
5167 // Write a single file.
5168 if (!do_write_one(cdata, fullpath, z, n)) {
5169 return false;
5170 }
5171 }
5172
5173 return true;
5174}
5175
5176/**
5177 * Internal method to write the indicated page and mipmap level to a disk
5178 * image file.
5179 */
5180bool Texture::
5181do_write_one(CData *cdata, const Filename &fullpath, int z, int n) {
5182 if (!do_has_ram_mipmap_image(cdata, n)) {
5183 return false;
5184 }
5185
5186 nassertr(cdata->_ram_image_compression == CM_off, false);
5187
5188 bool success;
5189 if (cdata->_component_type == T_float) {
5190 // Writing a floating-point texture.
5191 PfmFile pfm;
5192 if (!do_store_one(cdata, pfm, z, n)) {
5193 return false;
5194 }
5195 success = pfm.write(fullpath);
5196 } else {
5197 // Writing a normal, integer texture.
5198 PNMFileType *type =
5200 if (type == nullptr) {
5201 gobj_cat.error()
5202 << "Texture::write() - couldn't determine type from extension: " << fullpath << endl;
5203 return false;
5204 }
5205
5206 PNMImage pnmimage;
5207 if (!do_store_one(cdata, pnmimage, z, n)) {
5208 return false;
5209 }
5210 success = pnmimage.write(fullpath, type);
5211 }
5212
5213 if (!success) {
5214 gobj_cat.error()
5215 << "Texture::write() - couldn't write: " << fullpath << endl;
5216 return false;
5217 }
5218
5219 return true;
5220}
5221
5222/**
5223 * Internal method to copy a page and/or mipmap level to a PNMImage.
5224 */
5225bool Texture::
5226do_store_one(CData *cdata, PNMImage &pnmimage, int z, int n) {
5227 // First, reload the ram image if necessary.
5228 do_get_uncompressed_ram_image(cdata);
5229
5230 if (!do_has_ram_mipmap_image(cdata, n)) {
5231 return false;
5232 }
5233
5234 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5235 nassertr(cdata->_ram_image_compression == CM_off, false);
5236
5237 if (cdata->_component_type == T_float) {
5238 // PNMImage by way of PfmFile.
5239 PfmFile pfm;
5240 bool success = convert_to_pfm(pfm,
5241 do_get_expected_mipmap_x_size(cdata, n),
5242 do_get_expected_mipmap_y_size(cdata, n),
5243 cdata->_num_components, cdata->_component_width,
5244 cdata->_ram_images[n]._image,
5245 do_get_ram_mipmap_page_size(cdata, n), z);
5246 if (!success) {
5247 return false;
5248 }
5249 return pfm.store(pnmimage);
5250 }
5251
5252 return convert_to_pnmimage(pnmimage,
5253 do_get_expected_mipmap_x_size(cdata, n),
5254 do_get_expected_mipmap_y_size(cdata, n),
5255 cdata->_num_components, cdata->_component_type,
5256 is_srgb(cdata->_format),
5257 cdata->_ram_images[n]._image,
5258 do_get_ram_mipmap_page_size(cdata, n), z);
5259}
5260
5261/**
5262 * Internal method to copy a page and/or mipmap level to a PfmFile.
5263 */
5264bool Texture::
5265do_store_one(CData *cdata, PfmFile &pfm, int z, int n) {
5266 // First, reload the ram image if necessary.
5267 do_get_uncompressed_ram_image(cdata);
5268
5269 if (!do_has_ram_mipmap_image(cdata, n)) {
5270 return false;
5271 }
5272
5273 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5274 nassertr(cdata->_ram_image_compression == CM_off, false);
5275
5276 if (cdata->_component_type != T_float) {
5277 // PfmFile by way of PNMImage.
5278 PNMImage pnmimage;
5279 bool success =
5280 convert_to_pnmimage(pnmimage,
5281 do_get_expected_mipmap_x_size(cdata, n),
5282 do_get_expected_mipmap_y_size(cdata, n),
5283 cdata->_num_components, cdata->_component_type,
5284 is_srgb(cdata->_format),
5285 cdata->_ram_images[n]._image,
5286 do_get_ram_mipmap_page_size(cdata, n), z);
5287 if (!success) {
5288 return false;
5289 }
5290 return pfm.load(pnmimage);
5291 }
5292
5293 return convert_to_pfm(pfm,
5294 do_get_expected_mipmap_x_size(cdata, n),
5295 do_get_expected_mipmap_y_size(cdata, n),
5296 cdata->_num_components, cdata->_component_width,
5297 cdata->_ram_images[n]._image,
5298 do_get_ram_mipmap_page_size(cdata, n), z);
5299}
5300
5301/**
5302 * Called internally when write() detects a txo filename.
5303 */
5304bool Texture::
5305do_write_txo_file(const CData *cdata, const Filename &fullpath) const {
5307 Filename filename = Filename::binary_filename(fullpath);
5308 ostream *out = vfs->open_write_file(filename, true, true);
5309 if (out == nullptr) {
5310 gobj_cat.error()
5311 << "Unable to open " << filename << "\n";
5312 return false;
5313 }
5314
5315 bool success = do_write_txo(cdata, *out, fullpath);
5316 vfs->close_write_file(out);
5317 return success;
5318}
5319
5320/**
5321 *
5322 */
5323bool Texture::
5324do_write_txo(const CData *cdata, ostream &out, const string &filename) const {
5325 DatagramOutputFile dout;
5326
5327 if (!dout.open(out, filename)) {
5328 gobj_cat.error()
5329 << "Could not write texture object: " << filename << "\n";
5330 return false;
5331 }
5332
5333 if (!dout.write_header(_bam_header)) {
5334 gobj_cat.error()
5335 << "Unable to write to " << filename << "\n";
5336 return false;
5337 }
5338
5339 BamWriter writer(&dout);
5340 if (!writer.init()) {
5341 return false;
5342 }
5343
5344 writer.set_file_texture_mode(BamWriter::BTM_rawdata);
5345
5346 if (!writer.write_object(this)) {
5347 return false;
5348 }
5349
5350 if (!do_has_bam_rawdata(cdata)) {
5351 gobj_cat.error()
5352 << get_name() << " does not have ram image\n";
5353 return false;
5354 }
5355
5356 return true;
5357}
5358
5359/**
5360 * If the texture has a ram image already, this acquires the CData write lock
5361 * and returns it.
5362 *
5363 * If the texture lacks a ram image, this performs do_reload_ram_image(), but
5364 * without holding the lock on this particular Texture object, to avoid
5365 * holding the lock across what might be a slow operation. Instead, the
5366 * reload is performed in a copy of the texture object, and then the lock is
5367 * acquired and the data is copied in.
5368 *
5369 * In any case, the return value is a locked CData object, which must be
5370 * released with an explicit call to release_write(). The CData object will
5371 * have a ram image unless for some reason do_reload_ram_image() fails.
5372 */
5373Texture::CData *Texture::
5374unlocked_ensure_ram_image(bool allow_compression) {
5375 Thread *current_thread = Thread::get_current_thread();
5376
5377 // First, wait for any other threads that might be simultaneously performing
5378 // the same operation.
5379 MutexHolder holder(_lock);
5380 while (_reloading) {
5381 _cvar.wait();
5382 }
5383
5384 // Then make sure we still need to reload before continuing.
5385 const CData *cdata = _cycler.read(current_thread);
5386 bool has_ram_image = do_has_ram_image(cdata);
5387 if (has_ram_image && !allow_compression && cdata->_ram_image_compression != Texture::CM_off) {
5388 // If we don't want compression, but the ram image we have is pre-
5389 // compressed, we don't consider it.
5390 has_ram_image = false;
5391 }
5392 if (has_ram_image || !do_can_reload(cdata)) {
5393 // We don't need to reload after all, or maybe we can't reload anyway.
5394 // Return, but elevate the lock first, as we promised.
5395 return _cycler.elevate_read_upstream(cdata, false, current_thread);
5396 }
5397
5398 // We need to reload.
5399 nassertr(!_reloading, nullptr);
5400 _reloading = true;
5401
5402 PT(Texture) tex = do_make_copy(cdata);
5403 _cycler.release_read(cdata);
5404 _lock.unlock();
5405
5406 // Perform the actual reload in a copy of the texture, while our own mutex
5407 // is left unlocked.
5408 CDWriter cdata_tex(tex->_cycler, true);
5409 tex->do_reload_ram_image(cdata_tex, allow_compression);
5410
5411 _lock.lock();
5412
5413 CData *cdataw = _cycler.write_upstream(false, current_thread);
5414
5415 // Rather than calling do_assign(), which would copy *all* of the reloaded
5416 // texture's properties over, we only copy in the ones which are relevant to
5417 // the ram image. This way, if the properties have changed during the
5418 // reload (for instance, because we reloaded a txo), it won't contaminate
5419 // the original texture.
5420 cdataw->_orig_file_x_size = cdata_tex->_orig_file_x_size;
5421 cdataw->_orig_file_y_size = cdata_tex->_orig_file_y_size;
5422
5423 // If any of *these* properties have changed, the texture has changed in
5424 // some fundamental way. Update it appropriately.
5425 if (cdata_tex->_x_size != cdataw->_x_size ||
5426 cdata_tex->_y_size != cdataw->_y_size ||
5427 cdata_tex->_z_size != cdataw->_z_size ||
5428 cdata_tex->_num_views != cdataw->_num_views ||
5429 cdata_tex->_num_components != cdataw->_num_components ||
5430 cdata_tex->_component_width != cdataw->_component_width ||
5431 cdata_tex->_texture_type != cdataw->_texture_type ||
5432 cdata_tex->_component_type != cdataw->_component_type) {
5433
5434 cdataw->_x_size = cdata_tex->_x_size;
5435 cdataw->_y_size = cdata_tex->_y_size;
5436 cdataw->_z_size = cdata_tex->_z_size;
5437 cdataw->_num_views = cdata_tex->_num_views;
5438
5439 cdataw->_num_components = cdata_tex->_num_components;
5440 cdataw->_component_width = cdata_tex->_component_width;
5441 cdataw->_texture_type = cdata_tex->_texture_type;
5442 cdataw->_format = cdata_tex->_format;
5443 cdataw->_component_type = cdata_tex->_component_type;
5444
5445 cdataw->inc_properties_modified();
5446 cdataw->inc_image_modified();
5447 }
5448
5449 cdataw->_keep_ram_image = cdata_tex->_keep_ram_image;
5450 cdataw->_ram_image_compression = cdata_tex->_ram_image_compression;
5451 cdataw->_ram_images = cdata_tex->_ram_images;
5452
5453 nassertr(_reloading, nullptr);
5454 _reloading = false;
5455
5456 // We don't generally increment the cdata->_image_modified semaphore,
5457 // because this is just a reload, and presumably the image hasn't changed
5458 // (unless we hit the if condition above).
5459
5460 _cvar.notify_all();
5461
5462 // Return the still-locked cdata.
5463 return cdataw;
5464}
5465
5466/**
5467 * Called when the Texture image is required but the ram image is not
5468 * available, this will reload it from disk or otherwise do whatever is
5469 * required to make it available, if possible.
5470 *
5471 * Assumes the lock is already held. The lock will be held during the
5472 * duration of this operation.
5473 */
5474void Texture::
5475do_reload_ram_image(CData *cdata, bool allow_compression) {
5477 PT(BamCacheRecord) record;
5478
5479 if (!do_has_compression(cdata)) {
5480 allow_compression = false;
5481 }
5482
5483 if ((cache->get_cache_textures() || (allow_compression && cache->get_cache_compressed_textures())) && !textures_header_only) {
5484 // See if the texture can be found in the on-disk cache, if it is active.
5485
5486 record = cache->lookup(cdata->_fullpath, "txo");
5487 if (record != nullptr &&
5488 record->has_data()) {
5489 PT(Texture) tex = DCAST(Texture, record->get_data());
5490
5491 // But don't use the cache record if the config parameters have changed,
5492 // and we want a different-sized texture now.
5493 int x_size = cdata->_orig_file_x_size;
5494 int y_size = cdata->_orig_file_y_size;
5495 do_adjust_this_size(cdata, x_size, y_size, cdata->_filename.get_basename(), true);
5496 if (x_size != tex->get_x_size() || y_size != tex->get_y_size()) {
5497 if (gobj_cat.is_debug()) {
5498 gobj_cat.debug()
5499 << "Cached texture " << *this << " has size "
5500 << tex->get_x_size() << " x " << tex->get_y_size()
5501 << " instead of " << x_size << " x " << y_size
5502 << "; ignoring cache.\n";
5503 }
5504 } else {
5505 // Also don't keep the cached version if it's compressed but we want
5506 // uncompressed.
5507 if (!allow_compression && tex->get_ram_image_compression() != Texture::CM_off) {
5508 if (gobj_cat.is_debug()) {
5509 gobj_cat.debug()
5510 << "Cached texture " << *this
5511 << " is compressed in cache; ignoring cache.\n";
5512 }
5513 } else {
5514 gobj_cat.info()
5515 << "Texture " << get_name() << " reloaded from disk cache\n";
5516 // We don't want to replace all the texture parameters--for
5517 // instance, we don't want to change the filter type or the border
5518 // color or anything--we just want to get the image and necessary
5519 // associated parameters.
5520 CDReader cdata_tex(tex->_cycler);
5521 cdata->_x_size = cdata_tex->_x_size;
5522 cdata->_y_size = cdata_tex->_y_size;
5523 if (cdata->_num_components != cdata_tex->_num_components) {
5524 cdata->_num_components = cdata_tex->_num_components;
5525 cdata->_format = cdata_tex->_format;
5526 }
5527 cdata->_component_type = cdata_tex->_component_type;
5528 cdata->_compression = cdata_tex->_compression;
5529 cdata->_ram_image_compression = cdata_tex->_ram_image_compression;
5530 cdata->_ram_images = cdata_tex->_ram_images;
5531 cdata->_loaded_from_image = true;
5532
5533 bool was_compressed = (cdata->_ram_image_compression != CM_off);
5534 if (do_consider_auto_process_ram_image(cdata, uses_mipmaps(), allow_compression)) {
5535 bool is_compressed = (cdata->_ram_image_compression != CM_off);
5536 if (!was_compressed && is_compressed &&
5538 // We've re-compressed the image after loading it from the
5539 // cache. To keep the cache current, rewrite it to the cache
5540 // now, in its newly compressed form.
5541 record->set_data(this, this);
5542 cache->store(record);
5543 }
5544 }
5545
5546 return;
5547 }
5548 }
5549 }
5550 }
5551
5552 gobj_cat.info()
5553 << "Reloading texture " << get_name() << "\n";
5554
5555 int z = 0;
5556 int n = 0;
5557
5558 if (cdata->_has_read_pages) {
5559 z = cdata->_z_size;
5560 }
5561 if (cdata->_has_read_mipmaps) {
5562 n = cdata->_num_mipmap_levels_read;
5563 }
5564
5565 cdata->_loaded_from_image = false;
5566 Format orig_format = cdata->_format;
5567 int orig_num_components = cdata->_num_components;
5568
5569 LoaderOptions options;
5570 if (allow_compression) {
5571 options.set_texture_flags(LoaderOptions::TF_preload |
5572 LoaderOptions::TF_allow_compression);
5573 } else {
5574 options.set_texture_flags(LoaderOptions::TF_preload);
5575 }
5576 do_read(cdata, cdata->_fullpath, cdata->_alpha_fullpath,
5577 cdata->_primary_file_num_channels, cdata->_alpha_file_channel,
5578 z, n, cdata->_has_read_pages, cdata->_has_read_mipmaps, options, nullptr);
5579
5580 if (orig_num_components == cdata->_num_components) {
5581 // Restore the original format, in case it was needlessly changed during
5582 // the reload operation.
5583 cdata->_format = orig_format;
5584 }
5585
5586 if (do_has_ram_image(cdata) && record != nullptr) {
5587 if (cache->get_cache_textures() || (cdata->_ram_image_compression != CM_off && cache->get_cache_compressed_textures())) {
5588 // Update the cache.
5589 if (record != nullptr) {
5590 record->add_dependent_file(cdata->_fullpath);
5591 }
5592 record->set_data(this, this);
5593 cache->store(record);
5594 }
5595 }
5596}
5597
5598/**
5599 * This is called internally to uniquify the ram image pointer without
5600 * updating cdata->_image_modified.
5601 */
5602PTA_uchar Texture::
5603do_modify_ram_image(CData *cdata) {
5604 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty() ||
5605 cdata->_ram_image_compression != CM_off) {
5606 do_make_ram_image(cdata);
5607 } else {
5608 do_clear_ram_mipmap_images(cdata);
5609 }
5610 return cdata->_ram_images[0]._image;
5611}
5612
5613/**
5614 * This is called internally to make a new ram image without updating
5615 * cdata->_image_modified.
5616 */
5617PTA_uchar Texture::
5618do_make_ram_image(CData *cdata) {
5619 int image_size = do_get_expected_ram_image_size(cdata);
5620 cdata->_ram_images.clear();
5621 cdata->_ram_images.push_back(RamImage());
5622 cdata->_ram_images[0]._page_size = do_get_expected_ram_page_size(cdata);
5623 cdata->_ram_images[0]._image = PTA_uchar::empty_array(image_size, get_class_type());
5624 cdata->_ram_images[0]._pointer_image = nullptr;
5625 cdata->_ram_image_compression = CM_off;
5626
5627 if (cdata->_has_clear_color) {
5628 // Fill the image with the clear color.
5629 unsigned char pixel[16];
5630 const int pixel_size = do_get_clear_data(cdata, pixel);
5631 nassertr(pixel_size > 0, cdata->_ram_images[0]._image);
5632
5633 unsigned char *image_data = cdata->_ram_images[0]._image;
5634 for (int i = 0; i < image_size; i += pixel_size) {
5635 memcpy(image_data + i, pixel, pixel_size);
5636 }
5637 }
5638
5639 return cdata->_ram_images[0]._image;
5640}
5641
5642/**
5643 * Replaces the current system-RAM image with the new data. If compression is
5644 * not CM_off, it indicates that the new data is already pre-compressed in the
5645 * indicated format.
5646 *
5647 * This does *not* affect keep_ram_image.
5648 */
5649void Texture::
5650do_set_ram_image(CData *cdata, CPTA_uchar image, Texture::CompressionMode compression,
5651 size_t page_size) {
5652 nassertv(compression != CM_default);
5653 nassertv(compression != CM_off || image.size() == do_get_expected_ram_image_size(cdata));
5654 if (cdata->_ram_images.empty()) {
5655 cdata->_ram_images.push_back(RamImage());
5656 } else {
5657 do_clear_ram_mipmap_images(cdata);
5658 }
5659 if (page_size == 0) {
5660 page_size = image.size();
5661 }
5662 if (cdata->_ram_images[0]._image != image ||
5663 cdata->_ram_images[0]._page_size != page_size ||
5664 cdata->_ram_image_compression != compression) {
5665 cdata->_ram_images[0]._image = image.cast_non_const();
5666 cdata->_ram_images[0]._page_size = page_size;
5667 cdata->_ram_images[0]._pointer_image = nullptr;
5668 cdata->_ram_image_compression = compression;
5669 cdata->inc_image_modified();
5670 }
5671}
5672
5673/**
5674 * This is called internally to uniquify the nth mipmap image pointer without
5675 * updating cdata->_image_modified.
5676 */
5677PTA_uchar Texture::
5678do_modify_ram_mipmap_image(CData *cdata, int n) {
5679 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar());
5680
5681 if (n >= (int)cdata->_ram_images.size() ||
5682 cdata->_ram_images[n]._image.empty()) {
5683 do_make_ram_mipmap_image(cdata, n);
5684 }
5685 return cdata->_ram_images[n]._image;
5686}
5687
5688/**
5689 *
5690 */
5691PTA_uchar Texture::
5692do_make_ram_mipmap_image(CData *cdata, int n) {
5693 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar(get_class_type()));
5694
5695 while (n >= (int)cdata->_ram_images.size()) {
5696 cdata->_ram_images.push_back(RamImage());
5697 }
5698
5699 size_t image_size = do_get_expected_ram_mipmap_image_size(cdata, n);
5700 cdata->_ram_images[n]._image = PTA_uchar::empty_array(image_size, get_class_type());
5701 cdata->_ram_images[n]._pointer_image = nullptr;
5702 cdata->_ram_images[n]._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
5703
5704 if (cdata->_has_clear_color) {
5705 // Fill the image with the clear color.
5706 unsigned char pixel[16];
5707 const size_t pixel_size = (size_t)do_get_clear_data(cdata, pixel);
5708 nassertr(pixel_size > 0, cdata->_ram_images[n]._image);
5709
5710 unsigned char *image_data = cdata->_ram_images[n]._image;
5711 for (size_t i = 0; i < image_size; i += pixel_size) {
5712 memcpy(image_data + i, pixel, pixel_size);
5713 }
5714 }
5715
5716 return cdata->_ram_images[n]._image;
5717}
5718
5719/**
5720 *
5721 */
5722void Texture::
5723do_set_ram_mipmap_image(CData *cdata, int n, CPTA_uchar image, size_t page_size) {
5724 nassertv(cdata->_ram_image_compression != CM_off || image.size() == do_get_expected_ram_mipmap_image_size(cdata, n));
5725
5726 while (n >= (int)cdata->_ram_images.size()) {
5727 cdata->_ram_images.push_back(RamImage());
5728 }
5729 if (page_size == 0) {
5730 page_size = image.size();
5731 }
5732
5733 if (cdata->_ram_images[n]._image != image ||
5734 cdata->_ram_images[n]._page_size != page_size) {
5735 cdata->_ram_images[n]._image = image.cast_non_const();
5736 cdata->_ram_images[n]._pointer_image = nullptr;
5737 cdata->_ram_images[n]._page_size = page_size;
5738 cdata->inc_image_modified();
5739 }
5740}
5741
5742/**
5743 * Returns a string with a single pixel representing the clear color of the
5744 * texture in the format of this texture.
5745 *
5746 * In other words, to create an uncompressed RAM texture filled with the clear
5747 * color, it should be initialized with this string repeated for every pixel.
5748 */
5749size_t Texture::
5750do_get_clear_data(const CData *cdata, unsigned char *into) const {
5751 nassertr(cdata->_has_clear_color, 0);
5752
5753 int num_components = cdata->_num_components;
5754 nassertr(num_components > 0, 0);
5755 nassertr(num_components <= 4, 0);
5756
5757 LVecBase4 clear_value = cdata->_clear_color;
5758
5759 // Swap red and blue components.
5760 if (num_components >= 3) {
5761 std::swap(clear_value[0], clear_value[2]);
5762 }
5763
5764 switch (cdata->_component_type) {
5765 case T_unsigned_byte:
5766 if (is_srgb(cdata->_format)) {
5767 xel color;
5768 xelval alpha;
5769 encode_sRGB_uchar(clear_value, color, alpha);
5770 switch (num_components) {
5771 case 4: into[3] = (unsigned char)alpha;
5772 case 3: into[2] = (unsigned char)color.b;
5773 case 2: into[1] = (unsigned char)color.g;
5774 case 1: into[0] = (unsigned char)color.r;
5775 }
5776 } else {
5777 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5778 scaled *= 255;
5779 for (int i = 0; i < num_components; ++i) {
5780 into[i] = (unsigned char)scaled[i];
5781 }
5782 }
5783 break;
5784
5785 case T_unsigned_short:
5786 {
5787 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5788 scaled *= 65535;
5789 for (int i = 0; i < num_components; ++i) {
5790 ((unsigned short *)into)[i] = (unsigned short)scaled[i];
5791 }
5792 break;
5793 }
5794
5795 case T_float:
5796 for (int i = 0; i < num_components; ++i) {
5797 ((float *)into)[i] = clear_value[i];
5798 }
5799 break;
5800
5801 case T_unsigned_int_24_8:
5802 nassertr(num_components == 1, 0);
5803 *((unsigned int *)into) =
5804 ((unsigned int)(clear_value[0] * 16777215) << 8) +
5805 (unsigned int)max(min(clear_value[1], (PN_stdfloat)255), (PN_stdfloat)0);
5806 break;
5807
5808 case T_int:
5809 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5810 // normalization here, either.
5811 for (int i = 0; i < num_components; ++i) {
5812 ((int *)into)[i] = (int)clear_value[i];
5813 }
5814 break;
5815
5816 case T_byte:
5817 {
5818 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5819 scaled *= 127;
5820 for (int i = 0; i < num_components; ++i) {
5821 ((signed char *)into)[i] = (signed char)scaled[i];
5822 }
5823 break;
5824 }
5825
5826 case T_short:
5827 {
5828 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5829 scaled *= 32767;
5830 for (int i = 0; i < num_components; ++i) {
5831 ((short *)into)[i] = (short)scaled[i];
5832 }
5833 break;
5834 }
5835
5836 case T_half_float:
5837 for (int i = 0; i < num_components; ++i) {
5838 union {
5839 uint32_t ui;
5840 float uf;
5841 } v;
5842 v.uf = clear_value[i];
5843 uint16_t sign = ((v.ui & 0x80000000u) >> 16u);
5844 uint32_t mantissa = (v.ui & 0x007fffffu);
5845 uint16_t exponent = (uint16_t)std::min(std::max((int)((v.ui & 0x7f800000u) >> 23u) - 112, 0), 31);
5846 mantissa += (mantissa & 0x00001000u) << 1u;
5847 ((uint16_t *)into)[i] = (uint16_t)(sign | ((exponent << 10u) | (mantissa >> 13u)));
5848 }
5849 break;
5850
5851 case T_unsigned_int:
5852 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5853 // normalization here, either.
5854 for (int i = 0; i < num_components; ++i) {
5855 ((unsigned int *)into)[i] = (unsigned int)clear_value[i];
5856 }
5857 }
5858
5859 return num_components * cdata->_component_width;
5860}
5861
5862/**
5863 * Should be called after a texture has been loaded into RAM, this considers
5864 * generating mipmaps and/or compressing the RAM image.
5865 *
5866 * Returns true if the image was modified by this operation, false if it
5867 * wasn't.
5868 */
5869bool Texture::
5870consider_auto_process_ram_image(bool generate_mipmaps, bool allow_compression) {
5871 CDWriter cdata(_cycler, false);
5872 return do_consider_auto_process_ram_image(cdata, generate_mipmaps, allow_compression);
5873}
5874
5875/**
5876 * Should be called after a texture has been loaded into RAM, this considers
5877 * generating mipmaps and/or compressing the RAM image.
5878 *
5879 * Returns true if the image was modified by this operation, false if it
5880 * wasn't.
5881 */
5882bool Texture::
5883do_consider_auto_process_ram_image(CData *cdata, bool generate_mipmaps,
5884 bool allow_compression) {
5885 bool modified = false;
5886
5887 if (generate_mipmaps && !driver_generate_mipmaps &&
5888 cdata->_ram_images.size() == 1) {
5889 do_generate_ram_mipmap_images(cdata, false);
5890 modified = true;
5891 }
5892
5893 if (allow_compression && !driver_compress_textures) {
5894 CompressionMode compression = cdata->_compression;
5895 if (compression == CM_default && compressed_textures) {
5896 if (cdata->_texture_type == Texture::TT_buffer_texture) {
5897 compression = CM_off;
5898 }
5899 else {
5900 compression = CM_on;
5901 }
5902 }
5903 if (compression != CM_off && cdata->_ram_image_compression == CM_off) {
5905 if (do_compress_ram_image(cdata, compression, QL_default, gsg)) {
5906 if (gobj_cat.is_debug()) {
5907 gobj_cat.debug()
5908 << "Compressed " << get_name() << " with "
5909 << cdata->_ram_image_compression << "\n";
5910 }
5911 modified = true;
5912 }
5913 }
5914 }
5915
5916 return modified;
5917}
5918
5919/**
5920 *
5921 */
5922bool Texture::
5923do_compress_ram_image(CData *cdata, Texture::CompressionMode compression,
5924 Texture::QualityLevel quality_level,
5926 nassertr(compression != CM_off, false);
5927
5928 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
5929 return false;
5930 }
5931
5932 if (compression == CM_on) {
5933 // Select an appropriate compression mode automatically.
5934 switch (cdata->_format) {
5935 case Texture::F_rgbm:
5936 case Texture::F_rgb:
5937 case Texture::F_rgb5:
5938 case Texture::F_rgba5:
5939 case Texture::F_rgb8:
5940 case Texture::F_rgb12:
5941 case Texture::F_rgb332:
5942 case Texture::F_rgb16:
5943 case Texture::F_rgb32:
5944 case Texture::F_rgb10_a2:
5945 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt1)) {
5946 compression = CM_dxt1;
5947 } else if (gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5948 compression = CM_dxt3;
5949 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5950 compression = CM_dxt5;
5951 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5952 compression = CM_etc2;
5953 } else if (gsg->get_supports_compressed_texture_format(CM_etc1)) {
5954 compression = CM_etc1;
5955 }
5956 break;
5957
5958 case Texture::F_rgba4:
5959 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5960 compression = CM_dxt3;
5961 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5962 compression = CM_dxt5;
5963 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5964 compression = CM_etc2;
5965 }
5966 break;
5967
5968 case Texture::F_rgba:
5969 case Texture::F_rgba8:
5970 case Texture::F_rgba12:
5971 case Texture::F_rgba16:
5972 case Texture::F_rgba32:
5973 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5974 compression = CM_dxt5;
5975 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5976 compression = CM_etc2;
5977 }
5978 break;
5979
5980 case Texture::F_red:
5981 case Texture::F_rg:
5982 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_rgtc)) {
5983 compression = CM_rgtc;
5984 } else if (gsg->get_supports_compressed_texture_format(CM_eac)) {
5985 compression = CM_eac;
5986 }
5987 break;
5988
5989 default:
5990 break;
5991 }
5992 }
5993
5994 // Choose an appropriate quality level.
5995 if (quality_level == Texture::QL_default) {
5996 quality_level = cdata->_quality_level;
5997 }
5998 if (quality_level == Texture::QL_default) {
5999 quality_level = texture_quality_level;
6000 }
6001
6002 if (compression == CM_rgtc) {
6003 // We should compress RGTC ourselves, as squish does not support it.
6004 if (cdata->_component_type != T_unsigned_byte) {
6005 return false;
6006 }
6007
6008 if (!do_has_all_ram_mipmap_images(cdata)) {
6009 // If we're about to compress the RAM image, we should ensure that we
6010 // have all of the mipmap levels first.
6011 do_generate_ram_mipmap_images(cdata, false);
6012 }
6013
6014 RamImages compressed_ram_images;
6015 compressed_ram_images.resize(cdata->_ram_images.size());
6016
6017 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6018 const RamImage *uncompressed_image = &cdata->_ram_images[n];
6019
6020 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6021 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6022 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6023
6024 // It is important that we handle image sizes that aren't a multiple of
6025 // the block size, since this method may be used to compress mipmaps,
6026 // which go all the way to 1x1. Pad the image if necessary.
6027 RamImage temp_image;
6028 if ((x_size | y_size) & 0x3) {
6029 int virtual_x_size = x_size;
6030 int virtual_y_size = y_size;
6031 x_size = (x_size + 3) & ~0x3;
6032 y_size = (y_size + 3) & ~0x3;
6033
6034 temp_image._page_size = x_size * y_size * cdata->_num_components;
6035 temp_image._image = PTA_uchar::empty_array(temp_image._page_size * num_pages);
6036
6037 for (int z = 0; z < num_pages; ++z) {
6038 unsigned char *dest = temp_image._image.p() + z * temp_image._page_size;
6039 unsigned const char *src = uncompressed_image->_image.p() + z * uncompressed_image->_page_size;
6040
6041 for (int y = 0; y < virtual_y_size; ++y) {
6042 memcpy(dest, src, virtual_x_size);
6043 src += virtual_x_size;
6044 dest += x_size;
6045 }
6046 }
6047
6048 uncompressed_image = &temp_image;
6049 }
6050
6051 // Create a new image to hold the compressed texture pages.
6052 RamImage &compressed_image = compressed_ram_images[n];
6053 compressed_image._page_size = (x_size * y_size * cdata->_num_components) >> 1;
6054 compressed_image._image = PTA_uchar::empty_array(compressed_image._page_size * num_pages);
6055
6056 if (cdata->_num_components == 1) {
6057 do_compress_ram_image_bc4(*uncompressed_image, compressed_image,
6058 x_size, y_size, num_pages);
6059 } else if (cdata->_num_components == 2) {
6060 do_compress_ram_image_bc5(*uncompressed_image, compressed_image,
6061 x_size, y_size, num_pages);
6062 } else {
6063 // Invalid.
6064 return false;
6065 }
6066 }
6067
6068 cdata->_ram_images.swap(compressed_ram_images);
6069 cdata->_ram_image_compression = CM_rgtc;
6070 return true;
6071 }
6072
6073#ifdef HAVE_SQUISH
6074 if (cdata->_texture_type != TT_3d_texture &&
6075 cdata->_texture_type != TT_2d_texture_array &&
6076 cdata->_component_type == T_unsigned_byte) {
6077 int squish_flags = 0;
6078 switch (compression) {
6079 case CM_dxt1:
6080 squish_flags |= squish::kDxt1;
6081 break;
6082
6083 case CM_dxt3:
6084 squish_flags |= squish::kDxt3;
6085 break;
6086
6087 case CM_dxt5:
6088 squish_flags |= squish::kDxt5;
6089 break;
6090
6091 default:
6092 break;
6093 }
6094
6095 if (squish_flags != 0) {
6096 // This compression mode is supported by squish; use it.
6097 switch (quality_level) {
6098 case QL_fastest:
6099 squish_flags |= squish::kColourRangeFit;
6100 break;
6101
6102 case QL_normal:
6103 // ColourClusterFit is just too slow for everyday use.
6104 squish_flags |= squish::kColourRangeFit;
6105 // squish_flags |= squish::kColourClusterFit;
6106 break;
6107
6108 case QL_best:
6109 squish_flags |= squish::kColourIterativeClusterFit;
6110 break;
6111
6112 default:
6113 break;
6114 }
6115
6116 if (do_squish(cdata, compression, squish_flags)) {
6117 return true;
6118 }
6119 }
6120 }
6121#endif // HAVE_SQUISH
6122
6123 return false;
6124}
6125
6126/**
6127 *
6128 */
6129bool Texture::
6130do_uncompress_ram_image(CData *cdata) {
6131 nassertr(!cdata->_ram_images.empty(), false);
6132
6133 if (cdata->_ram_image_compression == CM_rgtc) {
6134 // We should decompress RGTC ourselves, as squish doesn't support it.
6135 RamImages uncompressed_ram_images;
6136 uncompressed_ram_images.resize(cdata->_ram_images.size());
6137
6138 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6139 const RamImage &compressed_image = cdata->_ram_images[n];
6140
6141 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6142 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6143 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6144
6145 RamImage &uncompressed_image = uncompressed_ram_images[n];
6146 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
6147 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
6148
6149 if (cdata->_num_components == 1) {
6150 do_uncompress_ram_image_bc4(compressed_image, uncompressed_image,
6151 x_size, y_size, num_pages);
6152 } else if (cdata->_num_components == 2) {
6153 do_uncompress_ram_image_bc5(compressed_image, uncompressed_image,
6154 x_size, y_size, num_pages);
6155 } else {
6156 // Invalid.
6157 return false;
6158 }
6159 }
6160 cdata->_ram_images.swap(uncompressed_ram_images);
6161 cdata->_ram_image_compression = CM_off;
6162 return true;
6163 }
6164
6165#ifdef HAVE_SQUISH
6166 if (cdata->_texture_type != TT_3d_texture &&
6167 cdata->_texture_type != TT_2d_texture_array &&
6168 cdata->_component_type == T_unsigned_byte) {
6169 int squish_flags = 0;
6170 switch (cdata->_ram_image_compression) {
6171 case CM_dxt1:
6172 squish_flags |= squish::kDxt1;
6173 break;
6174
6175 case CM_dxt3:
6176 squish_flags |= squish::kDxt3;
6177 break;
6178
6179 case CM_dxt5:
6180 squish_flags |= squish::kDxt5;
6181 break;
6182
6183 default:
6184 break;
6185 }
6186
6187 if (squish_flags != 0) {
6188 // This compression mode is supported by squish; use it.
6189 if (do_unsquish(cdata, squish_flags)) {
6190 return true;
6191 }
6192 }
6193 }
6194#endif // HAVE_SQUISH
6195 return false;
6196}
6197
6198/**
6199 * Compresses a RAM image using BC4 compression.
6200 */
6201void Texture::
6202do_compress_ram_image_bc4(const RamImage &uncompressed_image,
6203 RamImage &compressed_image,
6204 int x_size, int y_size, int num_pages) {
6205 int x_blocks = (x_size >> 2);
6206 int y_blocks = (y_size >> 2);
6207
6208 // NB. This algorithm isn't fully optimal, since it doesn't try to make use
6209 // of the secondary interpolation mode supported by BC4. This is not
6210 // important for most textures, but it may be added in the future.
6211
6212 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 <= uncompressed_image._page_size);
6213 nassertv((size_t)x_size * (size_t)y_size == uncompressed_image._page_size);
6214
6215 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6216
6217 for (int z = 0; z < num_pages; ++z) {
6218 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6219 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6220
6221 // Convert one 4 x 4 block at a time.
6222 for (int y = 0; y < y_blocks; ++y) {
6223 for (int x = 0; x < x_blocks; ++x) {
6224 int a, b, c, d;
6225 float fac, add;
6226 unsigned char minv, maxv;
6227 unsigned const char *blk = src;
6228
6229 // Find the minimum and maximum value in the block.
6230 minv = blk[0];
6231 maxv = blk[0];
6232 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6233 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6234 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6235 blk += x_size;
6236 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6237 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6238 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6239 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6240 blk += x_size;
6241 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6242 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6243 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6244 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6245 blk += x_size;
6246 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6247 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6248 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6249 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6250
6251 // Now calculate the index for each pixel.
6252 blk = src;
6253 if (maxv > minv) {
6254 fac = 7.5f / (maxv - minv);
6255 } else {
6256 fac = 0;
6257 }
6258 add = -minv * fac;
6259 a = (remap[(int)(blk[0] * fac + add)])
6260 | (remap[(int)(blk[1] * fac + add)] << 3)
6261 | (remap[(int)(blk[2] * fac + add)] << 6)
6262 | (remap[(int)(blk[3] * fac + add)] << 9);
6263 blk += x_size;
6264 b = (remap[(int)(blk[0] * fac + add)] << 4)
6265 | (remap[(int)(blk[1] * fac + add)] << 7)
6266 | (remap[(int)(blk[2] * fac + add)] << 10)
6267 | (remap[(int)(blk[3] * fac + add)] << 13);
6268 blk += x_size;
6269 c = (remap[(int)(blk[0] * fac + add)])
6270 | (remap[(int)(blk[1] * fac + add)] << 3)
6271 | (remap[(int)(blk[2] * fac + add)] << 6)
6272 | (remap[(int)(blk[3] * fac + add)] << 9);
6273 blk += x_size;
6274 d = (remap[(int)(blk[0] * fac + add)] << 4)
6275 | (remap[(int)(blk[1] * fac + add)] << 7)
6276 | (remap[(int)(blk[2] * fac + add)] << 10)
6277 | (remap[(int)(blk[3] * fac + add)] << 13);
6278
6279 *(dest++) = maxv;
6280 *(dest++) = minv;
6281 *(dest++) = a & 0xff;
6282 *(dest++) = (a >> 8) | (b & 0xf0);
6283 *(dest++) = b >> 8;
6284 *(dest++) = c & 0xff;
6285 *(dest++) = (c >> 8) | (d & 0xf0);
6286 *(dest++) = d >> 8;
6287
6288 // Advance to the beginning of the next 4x4 block.
6289 src += 4;
6290 }
6291 src += x_size * 3;
6292 }
6294 }
6295}
6296
6297/**
6298 * Compresses a RAM image using BC5 compression.
6299 */
6300void Texture::
6301do_compress_ram_image_bc5(const RamImage &uncompressed_image,
6302 RamImage &compressed_image,
6303 int x_size, int y_size, int num_pages) {
6304 int x_blocks = (x_size >> 2);
6305 int y_blocks = (y_size >> 2);
6306 int stride = x_size * 2;
6307
6308 // BC5 uses the same compression algorithm as BC4, except repeated for two
6309 // channels.
6310
6311 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 * 2 <= uncompressed_image._page_size);
6312 nassertv((size_t)stride * (size_t)y_size == uncompressed_image._page_size);
6313
6314 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6315
6316 for (int z = 0; z < num_pages; ++z) {
6317 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6318 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6319
6320 // Convert one 4 x 4 block at a time.
6321 for (int y = 0; y < y_blocks; ++y) {
6322 for (int x = 0; x < x_blocks; ++x) {
6323 int a, b, c, d;
6324 float fac, add;
6325 unsigned char minv, maxv;
6326 unsigned const char *blk = src;
6327
6328 // Find the minimum and maximum red value in the block.
6329 minv = blk[0];
6330 maxv = blk[0];
6331 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6332 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6333 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6334 blk += stride;
6335 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6336 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6337 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6338 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6339 blk += stride;
6340 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6341 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6342 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6343 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6344 blk += stride;
6345 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6346 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6347 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6348 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6349
6350 // Now calculate the index for each pixel.
6351 if (maxv > minv) {
6352 fac = 7.5f / (maxv - minv);
6353 } else {
6354 fac = 0;
6355 }
6356 add = -minv * fac;
6357 blk = src;
6358 a = (remap[(int)(blk[0] * fac + add)])
6359 | (remap[(int)(blk[2] * fac + add)] << 3)
6360 | (remap[(int)(blk[4] * fac + add)] << 6)
6361 | (remap[(int)(blk[6] * fac + add)] << 9);
6362 blk += stride;
6363 b = (remap[(int)(blk[0] * fac + add)] << 4)
6364 | (remap[(int)(blk[2] * fac + add)] << 7)
6365 | (remap[(int)(blk[4] * fac + add)] << 10)
6366 | (remap[(int)(blk[6] * fac + add)] << 13);
6367 blk += stride;
6368 c = (remap[(int)(blk[0] * fac + add)])
6369 | (remap[(int)(blk[2] * fac + add)] << 3)
6370 | (remap[(int)(blk[4] * fac + add)] << 6)
6371 | (remap[(int)(blk[6] * fac + add)] << 9);
6372 blk += stride;
6373 d = (remap[(int)(blk[0] * fac + add)] << 4)
6374 | (remap[(int)(blk[2] * fac + add)] << 7)
6375 | (remap[(int)(blk[4] * fac + add)] << 10)
6376 | (remap[(int)(blk[6] * fac + add)] << 13);
6377
6378 *(dest++) = maxv;
6379 *(dest++) = minv;
6380 *(dest++) = a & 0xff;
6381 *(dest++) = (a >> 8) | (b & 0xf0);
6382 *(dest++) = b >> 8;
6383 *(dest++) = c & 0xff;
6384 *(dest++) = (c >> 8) | (d & 0xf0);
6385 *(dest++) = d >> 8;
6386
6387 // Find the minimum and maximum green value in the block.
6388 blk = src + 1;
6389 minv = blk[0];
6390 maxv = blk[0];
6391 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6392 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6393 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6394 blk += stride;
6395 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6396 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6397 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6398 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6399 blk += stride;
6400 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6401 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6402 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6403 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6404 blk += stride;
6405 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6406 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6407 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6408 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6409
6410 // Now calculate the index for each pixel.
6411 if (maxv > minv) {
6412 fac = 7.5f / (maxv - minv);
6413 } else {
6414 fac = 0;
6415 }
6416 add = -minv * fac;
6417 blk = src + 1;
6418 a = (remap[(int)(blk[0] * fac + add)])
6419 | (remap[(int)(blk[2] * fac + add)] << 3)
6420 | (remap[(int)(blk[4] * fac + add)] << 6)
6421 | (remap[(int)(blk[6] * fac + add)] << 9);
6422 blk += stride;
6423 b = (remap[(int)(blk[0] * fac + add)] << 4)
6424 | (remap[(int)(blk[2] * fac + add)] << 7)
6425 | (remap[(int)(blk[4] * fac + add)] << 10)
6426 | (remap[(int)(blk[6] * fac + add)] << 13);
6427 blk += stride;
6428 c = (remap[(int)(blk[0] * fac + add)])
6429 | (remap[(int)(blk[2] * fac + add)] << 3)
6430 | (remap[(int)(blk[4] * fac + add)] << 6)
6431 | (remap[(int)(blk[6] * fac + add)] << 9);
6432 blk += stride;
6433 d = (remap[(int)(blk[0] * fac + add)] << 4)
6434 | (remap[(int)(blk[2] * fac + add)] << 7)
6435 | (remap[(int)(blk[4] * fac + add)] << 10)
6436 | (remap[(int)(blk[6] * fac + add)] << 13);
6437
6438 *(dest++) = maxv;
6439 *(dest++) = minv;
6440 *(dest++) = a & 0xff;
6441 *(dest++) = (a >> 8) | (b & 0xf0);
6442 *(dest++) = b >> 8;
6443 *(dest++) = c & 0xff;
6444 *(dest++) = (c >> 8) | (d & 0xf0);
6445 *(dest++) = d >> 8;
6446
6447 // Advance to the beginning of the next 4x4 block.
6448 src += 8;
6449 }
6450 src += stride * 3;
6451 }
6453 }
6454}
6455
6456/**
6457 * Decompresses a RAM image compressed using BC4.
6458 */
6459void Texture::
6460do_uncompress_ram_image_bc4(const RamImage &compressed_image,
6461 RamImage &uncompressed_image,
6462 int x_size, int y_size, int num_pages) {
6463 int x_blocks = (x_size >> 2);
6464 int y_blocks = (y_size >> 2);
6465
6466 for (int z = 0; z < num_pages; ++z) {
6467 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6468 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6469
6470 // Unconvert one 4 x 4 block at a time.
6471 uint8_t tbl[8];
6472 for (int y = 0; y < y_blocks; ++y) {
6473 for (int x = 0; x < x_blocks; ++x) {
6474 unsigned char *blk = dest;
6475 tbl[0] = src[0];
6476 tbl[1] = src[1];
6477 if (tbl[0] > tbl[1]) {
6478 tbl[2] = (tbl[0] * 6 + tbl[1] * 1) / 7.0f;
6479 tbl[3] = (tbl[0] * 5 + tbl[1] * 2) / 7.0f;
6480 tbl[4] = (tbl[0] * 4 + tbl[1] * 3) / 7.0f;
6481 tbl[5] = (tbl[0] * 3 + tbl[1] * 4) / 7.0f;
6482 tbl[6] = (tbl[0] * 2 + tbl[1] * 5) / 7.0f;
6483 tbl[7] = (tbl[0] * 1 + tbl[1] * 6) / 7.0f;
6484 } else {
6485 tbl[2] = (tbl[0] * 4 + tbl[1] * 1) / 5.0f;
6486 tbl[3] = (tbl[0] * 3 + tbl[1] * 2) / 5.0f;
6487 tbl[4] = (tbl[0] * 2 + tbl[1] * 3) / 5.0f;
6488 tbl[5] = (tbl[0] * 1 + tbl[1] * 4) / 5.0f;
6489 tbl[6] = 0;
6490 tbl[7] = 255;
6491 }
6492 int v = src[2] + (src[3] << 8) + (src[4] << 16);
6493 blk[0] = tbl[v & 0x7];
6494 blk[1] = tbl[(v & 0x000038) >> 3];
6495 blk[2] = tbl[(v & 0x0001c0) >> 6];
6496 blk[3] = tbl[(v & 0x000e00) >> 9];
6497 blk += x_size;
6498 blk[0] = tbl[(v & 0x007000) >> 12];
6499 blk[1] = tbl[(v & 0x038000) >> 15];
6500 blk[2] = tbl[(v & 0x1c0000) >> 18];
6501 blk[3] = tbl[(v & 0xe00000) >> 21];
6502 blk += x_size;
6503 v = src[5] + (src[6] << 8) + (src[7] << 16);
6504 blk[0] = tbl[v & 0x7];
6505 blk[1] = tbl[(v & 0x000038) >> 3];
6506 blk[2] = tbl[(v & 0x0001c0) >> 6];
6507 blk[3] = tbl[(v & 0x000e00) >> 9];
6508 blk += x_size;
6509 blk[0] = tbl[(v & 0x007000) >> 12];
6510 blk[1] = tbl[(v & 0x038000) >> 15];
6511 blk[2] = tbl[(v & 0x1c0000) >> 18];
6512 blk[3] = tbl[(v & 0xe00000) >> 21];
6513 src += 8;
6514 dest += 4;
6515 }
6516 dest += x_size * 3;
6517 }
6519 }
6520}
6521
6522/**
6523 * Decompresses a RAM image compressed using BC5.
6524 */
6525void Texture::
6526do_uncompress_ram_image_bc5(const RamImage &compressed_image,
6527 RamImage &uncompressed_image,
6528 int x_size, int y_size, int num_pages) {
6529 int x_blocks = (x_size >> 2);
6530 int y_blocks = (y_size >> 2);
6531 int stride = x_size * 2;
6532
6533 for (int z = 0; z < num_pages; ++z) {
6534 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6535 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6536
6537 // Unconvert one 4 x 4 block at a time.
6538 uint8_t red[8];
6539 uint8_t grn[8];
6540 for (int y = 0; y < y_blocks; ++y) {
6541 for (int x = 0; x < x_blocks; ++x) {
6542 unsigned char *blk = dest;
6543 red[0] = src[0];
6544 red[1] = src[1];
6545 if (red[0] > red[1]) {
6546 red[2] = (red[0] * 6 + red[1] * 1) / 7.0f;
6547 red[3] = (red[0] * 5 + red[1] * 2) / 7.0f;
6548 red[4] = (red[0] * 4 + red[1] * 3) / 7.0f;
6549 red[5] = (red[0] * 3 + red[1] * 4) / 7.0f;
6550 red[6] = (red[0] * 2 + red[1] * 5) / 7.0f;
6551 red[7] = (red[0] * 1 + red[1] * 6) / 7.0f;
6552 } else {
6553 red[2] = (red[0] * 4 + red[1] * 1) / 5.0f;
6554 red[3] = (red[0] * 3 + red[1] * 2) / 5.0f;
6555 red[4] = (red[0] * 2 + red[1] * 3) / 5.0f;
6556 red[5] = (red[0] * 1 + red[1] * 4) / 5.0f;
6557 red[6] = 0;
6558 red[7] = 255;
6559 }
6560 grn[0] = src[8];
6561 grn[1] = src[9];
6562 if (grn[0] > grn[1]) {
6563 grn[2] = (grn[0] * 6 + grn[1] * 1) / 7.0f;
6564 grn[3] = (grn[0] * 5 + grn[1] * 2) / 7.0f;
6565 grn[4] = (grn[0] * 4 + grn[1] * 3) / 7.0f;
6566 grn[5] = (grn[0] * 3 + grn[1] * 4) / 7.0f;
6567 grn[6] = (grn[0] * 2 + grn[1] * 5) / 7.0f;
6568 grn[7] = (grn[0] * 1 + grn[1] * 6) / 7.0f;
6569 } else {
6570 grn[2] = (grn[0] * 4 + grn[1] * 1) / 5.0f;
6571 grn[3] = (grn[0] * 3 + grn[1] * 2) / 5.0f;
6572 grn[4] = (grn[0] * 2 + grn[1] * 3) / 5.0f;
6573 grn[5] = (grn[0] * 1 + grn[1] * 4) / 5.0f;
6574 grn[6] = 0;
6575 grn[7] = 255;
6576 }
6577 int r = src[2] + (src[3] << 8) + (src[4] << 16);
6578 int g = src[10] + (src[11] << 8) + (src[12] << 16);
6579 blk[0] = red[r & 0x7];
6580 blk[1] = grn[g & 0x7];
6581 blk[2] = red[(r & 0x000038) >> 3];
6582 blk[3] = grn[(g & 0x000038) >> 3];
6583 blk[4] = red[(r & 0x0001c0) >> 6];
6584 blk[5] = grn[(g & 0x0001c0) >> 6];
6585 blk[6] = red[(r & 0x000e00) >> 9];
6586 blk[7] = grn[(g & 0x000e00) >> 9];
6587 blk += stride;
6588 blk[0] = red[(r & 0x007000) >> 12];
6589 blk[1] = grn[(g & 0x007000) >> 12];
6590 blk[2] = red[(r & 0x038000) >> 15];
6591 blk[3] = grn[(g & 0x038000) >> 15];
6592 blk[4] = red[(r & 0x1c0000) >> 18];
6593 blk[5] = grn[(g & 0x1c0000) >> 18];
6594 blk[6] = red[(r & 0xe00000) >> 21];
6595 blk[7] = grn[(g & 0xe00000) >> 21];
6596 blk += stride;
6597 r = src[5] + (src[6] << 8) + (src[7] << 16);
6598 g = src[13] + (src[14] << 8) + (src[15] << 16);
6599 blk[0] = red[r & 0x7];
6600 blk[1] = grn[g & 0x7];
6601 blk[2] = red[(r & 0x000038) >> 3];
6602 blk[3] = grn[(g & 0x000038) >> 3];
6603 blk[4] = red[(r & 0x0001c0) >> 6];
6604 blk[5] = grn[(g & 0x0001c0) >> 6];
6605 blk[6] = red[(r & 0x000e00) >> 9];
6606 blk[7] = grn[(g & 0x000e00) >> 9];
6607 blk += stride;
6608 blk[0] = red[(r & 0x007000) >> 12];
6609 blk[1] = grn[(g & 0x007000) >> 12];
6610 blk[2] = red[(r & 0x038000) >> 15];
6611 blk[3] = grn[(g & 0x038000) >> 15];
6612 blk[4] = red[(r & 0x1c0000) >> 18];
6613 blk[5] = grn[(g & 0x1c0000) >> 18];
6614 blk[6] = red[(r & 0xe00000) >> 21];
6615 blk[7] = grn[(g & 0xe00000) >> 21];
6616 src += 16;
6617 dest += 8;
6618 }
6619 dest += stride * 3;
6620 }
6622 }
6623}
6624
6625/**
6626 *
6627 */
6628bool Texture::
6629do_has_all_ram_mipmap_images(const CData *cdata) const {
6630 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
6631 // If we don't even have a base image, the answer is no.
6632 return false;
6633 }
6634 if (!uses_mipmaps()) {
6635 // If we have a base image and don't require mipmapping, the answer is
6636 // yes.
6637 return true;
6638 }
6639
6640 // Check that we have enough mipmap levels to meet the size requirements.
6641 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
6642 int n = 0;
6643 int x = 1;
6644 while (x < size) {
6645 x = (x << 1);
6646 ++n;
6647 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
6648 return false;
6649 }
6650 }
6651
6652 return true;
6653}
6654
6655/**
6656 * Considers whether the z_size (or num_views) should automatically be
6657 * adjusted when the user loads a new page. Returns true if the z size is
6658 * valid, false otherwise.
6659 *
6660 * Assumes the lock is already held.
6661 */
6662bool Texture::
6663do_reconsider_z_size(CData *cdata, int z, const LoaderOptions &options) {
6664 if (z >= cdata->_z_size * cdata->_num_views) {
6665 bool num_views_specified = true;
6666 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
6667 // This flag is false if is a multiview texture with a specified number
6668 // of views. It is true if it is not a multiview texture, or if it is
6669 // but the number of views is explicitly specified.
6670 num_views_specified = (options.get_texture_num_views() != 0);
6671 }
6672
6673 if (num_views_specified &&
6674 (cdata->_texture_type == Texture::TT_3d_texture ||
6675 cdata->_texture_type == Texture::TT_2d_texture_array)) {
6676 // If we're loading a page past _z_size, treat it as an implicit request
6677 // to enlarge _z_size. However, this is only legal if this is, in fact,
6678 // a 3-d texture or a 2d texture array (cube maps always have z_size 6,
6679 // and other types have z_size 1).
6680 nassertr(cdata->_num_views != 0, false);
6681 cdata->_z_size = (z / cdata->_num_views) + 1;
6682
6683 } else if (cdata->_z_size != 0) {
6684 // In the case of a 2-d texture or cube map, or a 3-d texture with an
6685 // unspecified _num_views, assume we're loading views of a multiview
6686 // texture.
6687 cdata->_num_views = (z / cdata->_z_size) + 1;
6688
6689 } else {
6690 // The first image loaded sets an implicit z-size.
6691 cdata->_z_size = 1;
6692 }
6693
6694 // Increase the size of the data buffer to make room for the new texture
6695 // level.
6696 do_allocate_pages(cdata);
6697 }
6698
6699 return true;
6700}
6701
6702/**
6703 * Called internally by do_reconsider_z_size() to allocate new memory in
6704 * _ram_images[0] for the new number of pages.
6705 *
6706 * Assumes the lock is already held.
6707 */
6708void Texture::
6709do_allocate_pages(CData *cdata) {
6710 size_t new_size = do_get_expected_ram_image_size(cdata);
6711 if (!cdata->_ram_images.empty() &&
6712 !cdata->_ram_images[0]._image.empty() &&
6713 new_size > cdata->_ram_images[0]._image.size()) {
6714 cdata->_ram_images[0]._image.insert(cdata->_ram_images[0]._image.end(), new_size - cdata->_ram_images[0]._image.size(), 0);
6715 nassertv(cdata->_ram_images[0]._image.size() == new_size);
6716 }
6717}
6718
6719/**
6720 * Resets the internal Texture properties when a new image file is loaded.
6721 * Returns true if the new image is valid, false otherwise.
6722 *
6723 * Assumes the lock is already held.
6724 */
6725bool Texture::
6726do_reconsider_image_properties(CData *cdata, int x_size, int y_size, int num_components,
6727 Texture::ComponentType component_type, int z,
6728 const LoaderOptions &options) {
6729 if (!cdata->_loaded_from_image || num_components != cdata->_num_components || component_type != cdata->_component_type) {
6730 // Come up with a default format based on the number of channels. But
6731 // only do this the first time the file is loaded, or if the number of
6732 // channels in the image changes on subsequent loads.
6733
6734 // TODO: handle sRGB properly
6735 switch (num_components) {
6736 case 1:
6737 cdata->_format = F_luminance;
6738 break;
6739
6740 case 2:
6741 cdata->_format = F_luminance_alpha;
6742 break;
6743
6744 case 3:
6745 cdata->_format = F_rgb;
6746 break;
6747
6748 case 4:
6749 cdata->_format = F_rgba;
6750 break;
6751
6752 default:
6753 // Eh?
6754 nassert_raise("unexpected channel count");
6755 cdata->_format = F_rgb;
6756 return false;
6757 }
6758 }
6759
6760 if (!cdata->_loaded_from_image) {
6761 if ((options.get_texture_flags() & LoaderOptions::TF_allow_1d) &&
6762 cdata->_texture_type == TT_2d_texture && x_size != 1 && y_size == 1) {
6763 // If we're loading an Nx1 size texture, infer a 1-d texture type.
6764 cdata->_texture_type = TT_1d_texture;
6765 }
6766
6767#ifndef NDEBUG
6768 switch (cdata->_texture_type) {
6769 case TT_1d_texture:
6770 case TT_buffer_texture:
6771 nassertr(y_size == 1, false);
6772 break;
6773 case TT_cube_map:
6774 case TT_cube_map_array:
6775 nassertr(x_size == y_size, false);
6776 break;
6777 default:
6778 break;
6779 }
6780#endif
6781 if ((cdata->_x_size != x_size)||(cdata->_y_size != y_size)) {
6782 do_set_pad_size(cdata, 0, 0, 0);
6783 }
6784 cdata->_x_size = x_size;
6785 cdata->_y_size = y_size;
6786 cdata->_num_components = num_components;
6787 do_set_component_type(cdata, component_type);
6788
6789 } else {
6790 if (cdata->_x_size != x_size ||
6791 cdata->_y_size != y_size ||
6792 cdata->_num_components != num_components ||
6793 cdata->_component_type != component_type) {
6794 gobj_cat.error()
6795 << "Texture properties have changed for texture " << get_name()
6796 << " page " << z << ".\n";
6797 return false;
6798 }
6799 }
6800
6801 return true;
6802}
6803
6804/**
6805 *
6806 */
6807bool Texture::
6808do_rescale_texture(CData *cdata) {
6809 int new_x_size = cdata->_x_size;
6810 int new_y_size = cdata->_y_size;
6811 if (cdata->_z_size * cdata->_num_views != 1) {
6812 nassert_raise("rescale_texture() doesn't support 3-d or multiview textures.");
6813 return false;
6814 }
6815
6816 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), false)) {
6817 // OK, we have to scale the image.
6818 PNMImage orig_image;
6819 if (!do_store_one(cdata, orig_image, 0, 0)) {
6820 gobj_cat.warning()
6821 << "Couldn't get image in rescale_texture()\n";
6822 return false;
6823 }
6824
6825 gobj_cat.info()
6826 << "Resizing " << get_name() << " to " << new_x_size << " x "
6827 << new_y_size << "\n";
6828 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6829 orig_image.get_maxval(), orig_image.get_type(),
6830 orig_image.get_color_space());
6831 new_image.quick_filter_from(orig_image);
6832
6833 do_clear_ram_image(cdata);
6834 cdata->inc_image_modified();
6835 cdata->_x_size = new_x_size;
6836 cdata->_y_size = new_y_size;
6837 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6838 return false;
6839 }
6840
6841 return true;
6842 }
6843
6844 // Maybe we should pad the image.
6845 int pad_x_size = 0;
6846 int pad_y_size = 0;
6847 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
6848 new_x_size = cdata->_x_size;
6849 new_y_size = cdata->_y_size;
6850 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), true)) {
6851 pad_x_size = new_x_size - cdata->_x_size;
6852 pad_y_size = new_y_size - cdata->_y_size;
6853
6854 PNMImage orig_image;
6855 if (!do_store_one(cdata, orig_image, 0, 0)) {
6856 gobj_cat.warning()
6857 << "Couldn't get image in rescale_texture()\n";
6858 return false;
6859 }
6860 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6861 orig_image.get_maxval(), orig_image.get_type(),
6862 orig_image.get_color_space());
6863 new_image.copy_sub_image(orig_image, 0, new_y_size - orig_image.get_y_size());
6864
6865 do_clear_ram_image(cdata);
6866 cdata->_loaded_from_image = false;
6867 cdata->inc_image_modified();
6868 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6869 return false;
6870 }
6871
6872 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
6873 return true;
6874 }
6875 }
6876
6877 // No changes needed.
6878 return false;
6879}
6880
6881/**
6882 *
6883 */
6884PT(Texture) Texture::
6885make_copy_impl() const {
6886 CDReader cdata(_cycler);
6887 return do_make_copy(cdata);
6888}
6889
6890/**
6891 *
6892 */
6893PT(Texture) Texture::
6894do_make_copy(const CData *cdata) const {
6895 PT(Texture) tex = new Texture(get_name());
6896 CDWriter cdata_tex(tex->_cycler, true);
6897 tex->do_assign(cdata_tex, this, cdata);
6898 return tex;
6899}
6900
6901/**
6902 * The internal implementation of operator =(). Assumes the lock is already
6903 * held on both Textures.
6904 */
6905void Texture::
6906do_assign(CData *cdata, const Texture *copy, const CData *cdata_copy) {
6907 cdata->do_assign(cdata_copy);
6908}
6909
6910/**
6911 * The protected implementation of clear(). Assumes the lock is already held.
6912 */
6913void Texture::
6914do_clear(CData *cdata) {
6915 Texture tex;
6916 tex.local_object();
6917 CDReader cdata_tex(tex._cycler);
6918 do_assign(cdata, &tex, cdata_tex);
6919
6920 cdata->inc_properties_modified();
6921 cdata->inc_image_modified();
6922 cdata->inc_simple_image_modified();
6923}
6924
6925/**
6926 *
6927 */
6928void Texture::
6929do_setup_texture(CData *cdata, Texture::TextureType texture_type,
6930 int x_size, int y_size, int z_size,
6931 Texture::ComponentType component_type,
6932 Texture::Format format) {
6933 switch (texture_type) {
6934 case TT_1d_texture:
6935 nassertv(y_size == 1 && z_size == 1);
6936 break;
6937
6938 case TT_2d_texture:
6939 nassertv(z_size == 1);
6940 break;
6941
6942 case TT_3d_texture:
6943 break;
6944
6945 case TT_2d_texture_array:
6946 break;
6947
6948 case TT_cube_map:
6949 // Cube maps must always consist of six square images.
6950 nassertv(x_size == y_size && z_size == 6);
6951
6952 // In principle the wrap mode shouldn't mean anything to a cube map, but
6953 // some drivers seem to misbehave if it's other than
6954 // SamplerState::WM_clamp.
6955 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6956 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6957 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6958 break;
6959
6960 case TT_cube_map_array:
6961 // Cube maps array z_size needs to be a multiple of 6.
6962 nassertv(x_size == y_size && z_size % 6 == 0);
6963
6964 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6965 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6966 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6967 break;
6968
6969 case TT_buffer_texture:
6970 nassertv(y_size == 1 && z_size == 1);
6971 break;
6972
6973 case TT_1d_texture_array:
6974 nassertv(z_size == 1);
6975 break;
6976 }
6977
6978 if (texture_type != TT_2d_texture) {
6979 do_clear_simple_ram_image(cdata);
6980 }
6981
6982 cdata->_texture_type = texture_type;
6983 cdata->_x_size = x_size;
6984 cdata->_y_size = y_size;
6985 cdata->_z_size = z_size;
6986 cdata->_num_views = 1;
6987 do_set_component_type(cdata, component_type);
6988 do_set_format(cdata, format);
6989
6990 do_clear_ram_image(cdata);
6991 do_set_pad_size(cdata, 0, 0, 0);
6992 cdata->_orig_file_x_size = 0;
6993 cdata->_orig_file_y_size = 0;
6994 cdata->_loaded_from_image = false;
6995 cdata->_loaded_from_txo = false;
6996 cdata->_has_read_pages = false;
6997 cdata->_has_read_mipmaps = false;
6998}
6999
7000/**
7001 *
7002 */
7003void Texture::
7004do_set_format(CData *cdata, Texture::Format format) {
7005 if (format == cdata->_format) {
7006 return;
7007 }
7008 cdata->_format = format;
7009 cdata->inc_properties_modified();
7010
7011 switch (cdata->_format) {
7012 case F_color_index:
7013 case F_depth_stencil:
7014 case F_depth_component:
7015 case F_depth_component16:
7016 case F_depth_component24:
7017 case F_depth_component32:
7018 case F_red:
7019 case F_green:
7020 case F_blue:
7021 case F_alpha:
7022 case F_luminance:
7023 case F_r16:
7024 case F_r16i:
7025 case F_sluminance:
7026 case F_r32i:
7027 case F_r32:
7028 case F_r8i:
7029 cdata->_num_components = 1;
7030 break;
7031
7032 case F_luminance_alpha:
7033 case F_luminance_alphamask:
7034 case F_rg16:
7035 case F_sluminance_alpha:
7036 case F_rg32:
7037 case F_rg8i:
7038 case F_rg:
7039 case F_rg16i:
7040 case F_rg32i:
7041 cdata->_num_components = 2;
7042 break;
7043
7044 case F_rgb:
7045 case F_rgb5:
7046 case F_rgb8:
7047 case F_rgb12:
7048 case F_rgb332:
7049 case F_rgb16:
7050 case F_srgb:
7051 case F_rgb32:
7052 case F_rgb8i:
7053 case F_r11_g11_b10:
7054 case F_rgb9_e5:
7055 case F_rgb16i:
7056 case F_rgb32i:
7057 cdata->_num_components = 3;
7058 break;
7059
7060 case F_rgba:
7061 case F_rgbm:
7062 case F_rgba4:
7063 case F_rgba5:
7064 case F_rgba8:
7065 case F_rgba12:
7066 case F_rgba16:
7067 case F_rgba32:
7068 case F_srgb_alpha:
7069 case F_rgba8i:
7070 case F_rgb10_a2:
7071 case F_rgba16i:
7072 case F_rgba32i:
7073 cdata->_num_components = 4;
7074 break;
7075 }
7076}
7077
7078/**
7079 *
7080 */
7081void Texture::
7082do_set_component_type(CData *cdata, Texture::ComponentType component_type) {
7083 cdata->_component_type = component_type;
7084
7085 switch (component_type) {
7086 case T_unsigned_byte:
7087 case T_byte:
7088 cdata->_component_width = 1;
7089 break;
7090
7091 case T_unsigned_short:
7092 case T_short:
7093 case T_half_float:
7094 cdata->_component_width = 2;
7095 break;
7096
7097 case T_float:
7098 case T_unsigned_int_24_8:
7099 case T_int:
7100 case T_unsigned_int:
7101 cdata->_component_width = 4;
7102 break;
7103 }
7104}
7105
7106/**
7107 *
7108 */
7109void Texture::
7110do_set_x_size(CData *cdata, int x_size) {
7111 if (cdata->_x_size != x_size) {
7112 cdata->_x_size = x_size;
7113 cdata->inc_image_modified();
7114 do_clear_ram_image(cdata);
7115 do_set_pad_size(cdata, 0, 0, 0);
7116 }
7117}
7118
7119/**
7120 *
7121 */
7122void Texture::
7123do_set_y_size(CData *cdata, int y_size) {
7124 if (cdata->_y_size != y_size) {
7125 nassertv((cdata->_texture_type != Texture::TT_buffer_texture &&
7126 cdata->_texture_type != Texture::TT_1d_texture) || y_size == 1);
7127 cdata->_y_size = y_size;
7128 cdata->inc_image_modified();
7129 do_clear_ram_image(cdata);
7130 do_set_pad_size(cdata, 0, 0, 0);
7131 }
7132}
7133
7134/**
7135 * Changes the z size indicated for the texture. This also implicitly unloads
7136 * the texture if it has already been loaded.
7137 */
7138void Texture::
7139do_set_z_size(CData *cdata, int z_size) {
7140 if (cdata->_z_size != z_size) {
7141 nassertv((cdata->_texture_type == Texture::TT_3d_texture) ||
7142 (cdata->_texture_type == Texture::TT_cube_map && z_size == 6) ||
7143 (cdata->_texture_type == Texture::TT_cube_map_array && z_size % 6 == 0) ||
7144 (cdata->_texture_type == Texture::TT_2d_texture_array) || (z_size == 1));
7145 cdata->_z_size = z_size;
7146 cdata->inc_image_modified();
7147 do_clear_ram_image(cdata);
7148 do_set_pad_size(cdata, 0, 0, 0);
7149 }
7150}
7151
7152/**
7153 *
7154 */
7155void Texture::
7156do_set_num_views(CData *cdata, int num_views) {
7157 nassertv(num_views >= 1);
7158 if (cdata->_num_views != num_views) {
7159 cdata->_num_views = num_views;
7160 if (do_has_ram_image(cdata)) {
7161 cdata->inc_image_modified();
7162 do_clear_ram_image(cdata);
7163 }
7164 do_set_pad_size(cdata, 0, 0, 0);
7165 }
7166}
7167
7168/**
7169 *
7170 */
7171void Texture::
7172do_set_wrap_u(CData *cdata, SamplerState::WrapMode wrap) {
7173 if (cdata->_default_sampler.get_wrap_u() != wrap) {
7174 cdata->inc_properties_modified();
7175 cdata->_default_sampler.set_wrap_u(wrap);
7176 }
7177}
7178
7179/**
7180 *
7181 */
7182void Texture::
7183do_set_wrap_v(CData *cdata, SamplerState::WrapMode wrap) {
7184 if (cdata->_default_sampler.get_wrap_v() != wrap) {
7185 cdata->inc_properties_modified();
7186 cdata->_default_sampler.set_wrap_v(wrap);
7187 }
7188}
7189
7190/**
7191 *
7192 */
7193void Texture::
7194do_set_wrap_w(CData *cdata, SamplerState::WrapMode wrap) {
7195 if (cdata->_default_sampler.get_wrap_w() != wrap) {
7196 cdata->inc_properties_modified();
7197 cdata->_default_sampler.set_wrap_w(wrap);
7198 }
7199}
7200
7201/**
7202 *
7203 */
7204void Texture::
7205do_set_minfilter(CData *cdata, SamplerState::FilterType filter) {
7206 if (cdata->_default_sampler.get_minfilter() != filter) {
7207 cdata->inc_properties_modified();
7208 cdata->_default_sampler.set_minfilter(filter);
7209 }
7210}
7211
7212/**
7213 *
7214 */
7215void Texture::
7216do_set_magfilter(CData *cdata, SamplerState::FilterType filter) {
7217 if (cdata->_default_sampler.get_magfilter() != filter) {
7218 cdata->inc_properties_modified();
7219 cdata->_default_sampler.set_magfilter(filter);
7220 }
7221}
7222
7223/**
7224 *
7225 */
7226void Texture::
7227do_set_anisotropic_degree(CData *cdata, int anisotropic_degree) {
7228 if (cdata->_default_sampler.get_anisotropic_degree() != anisotropic_degree) {
7229 cdata->inc_properties_modified();
7230 cdata->_default_sampler.set_anisotropic_degree(anisotropic_degree);
7231 }
7232}
7233
7234/**
7235 *
7236 */
7237void Texture::
7238do_set_border_color(CData *cdata, const LColor &color) {
7239 if (cdata->_default_sampler.get_border_color() != color) {
7240 cdata->inc_properties_modified();
7241 cdata->_default_sampler.set_border_color(color);
7242 }
7243}
7244
7245/**
7246 *
7247 */
7248void Texture::
7249do_set_compression(CData *cdata, Texture::CompressionMode compression) {
7250 if (cdata->_compression != compression) {
7251 cdata->inc_properties_modified();
7252 cdata->_compression = compression;
7253
7254 if (do_has_ram_image(cdata)) {
7255 bool has_compression = do_has_compression(cdata);
7256 bool has_ram_image_compression = (cdata->_ram_image_compression != CM_off);
7257 if (has_compression != has_ram_image_compression ||
7259 // Reload if we're turning compression on or off, or if we're changing
7260 // the compression mode to a different kind of compression.
7261 do_reload(cdata);
7262 }
7263 }
7264 }
7265}
7266
7267/**
7268 *
7269 */
7270void Texture::
7271do_set_quality_level(CData *cdata, Texture::QualityLevel quality_level) {
7272 if (cdata->_quality_level != quality_level) {
7273 cdata->inc_properties_modified();
7274 cdata->_quality_level = quality_level;
7275 }
7276}
7277
7278/**
7279 *
7280 */
7281bool Texture::
7282do_has_compression(const CData *cdata) const {
7283 if (cdata->_compression == CM_default) {
7284 if (cdata->_texture_type != Texture::TT_buffer_texture) {
7285 return compressed_textures;
7286 } else {
7287 return false;
7288 }
7289 } else {
7290 return (cdata->_compression != CM_off);
7291 }
7292}
7293
7294/**
7295 * The protected implementation of has_ram_image(). Assumes the lock is
7296 * already held.
7297 */
7298bool Texture::
7299do_has_ram_image(const CData *cdata) const {
7300 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty();
7301}
7302
7303/**
7304 * The protected implementation of has_uncompressed_ram_image(). Assumes the
7305 * lock is already held.
7306 */
7307bool Texture::
7308do_has_uncompressed_ram_image(const CData *cdata) const {
7309 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty() && cdata->_ram_image_compression == CM_off;
7310}
7311
7312/**
7313 *
7314 */
7315CPTA_uchar Texture::
7316do_get_ram_image(CData *cdata) {
7317 if (!do_has_ram_image(cdata) && do_can_reload(cdata)) {
7318 do_reload_ram_image(cdata, true);
7319
7320 if (do_has_ram_image(cdata)) {
7321 // Normally, we don't update the cdata->_modified semaphores in a
7322 // do_blah method, but we'll make an exception in this case, because
7323 // it's easiest to modify these here, and only when we know it's needed.
7324 cdata->inc_image_modified();
7325 cdata->inc_properties_modified();
7326 }
7327 }
7328
7329 if (cdata->_ram_images.empty()) {
7330 return CPTA_uchar(get_class_type());
7331 }
7332
7333 return cdata->_ram_images[0]._image;
7334}
7335
7336/**
7337 *
7338 */
7339CPTA_uchar Texture::
7340do_get_uncompressed_ram_image(CData *cdata) {
7341 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7342 // We have an image in-ram, but it's compressed. Try to uncompress it
7343 // first.
7344 if (do_uncompress_ram_image(cdata)) {
7345 if (gobj_cat.is_debug()) {
7346 gobj_cat.debug()
7347 << "Uncompressed " << get_name() << "\n";
7348 }
7349 return cdata->_ram_images[0]._image;
7350 }
7351 }
7352
7353 // Couldn't uncompress the existing image. Try to reload it.
7354 if ((!do_has_ram_image(cdata) || cdata->_ram_image_compression != CM_off) && do_can_reload(cdata)) {
7355 do_reload_ram_image(cdata, false);
7356 }
7357
7358 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7359 // Great, now we have an image.
7360 if (do_uncompress_ram_image(cdata)) {
7361 gobj_cat.info()
7362 << "Uncompressed " << get_name() << "\n";
7363 return cdata->_ram_images[0]._image;
7364 }
7365 }
7366
7367 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
7368 return CPTA_uchar(get_class_type());
7369 }
7370
7371 return cdata->_ram_images[0]._image;
7372}
7373
7374/**
7375 * Returns the uncompressed system-RAM image data associated with the texture.
7376 * Rather than just returning a pointer to the data, like
7377 * get_uncompressed_ram_image, this function first processes the data and
7378 * reorders the components using the specified format string, and places these
7379 * into a new char array.
7380 *
7381 * The 'format' argument should specify in which order the components of the
7382 * texture must be. For example, valid format strings are "RGBA", "GA",
7383 * "ABRG" or "AAA". A component can also be written as "0" or "1", which
7384 * means an empty/black or a full/white channel, respectively.
7385 *
7386 * This function is particularly useful to copy an image in-memory to a
7387 * different library (for example, PIL or wxWidgets) that require a different
7388 * component order than Panda's internal format, BGRA. Note, however, that
7389 * this conversion can still be too slow if you want to do it every frame, and
7390 * should thus be avoided for that purpose.
7391 *
7392 * The only requirement for the reordering is that an uncompressed image must
7393 * be available. If the RAM image is compressed, it will attempt to re-load
7394 * the texture from disk, if it doesn't find an uncompressed image there, it
7395 * will return NULL.
7396 */
7398get_ram_image_as(const string &requested_format) {
7399 CDWriter cdata(_cycler, false);
7400 string format = upcase(requested_format);
7401
7402 // Make sure we can grab something that's uncompressed.
7403 CPTA_uchar data = do_get_uncompressed_ram_image(cdata);
7404 if (data == nullptr) {
7405 gobj_cat.error() << "Couldn't find an uncompressed RAM image!\n";
7406 return CPTA_uchar(get_class_type());
7407 }
7408 int imgsize = cdata->_x_size * cdata->_y_size;
7409 nassertr(cdata->_num_components > 0 && cdata->_num_components <= 4, CPTA_uchar(get_class_type()));
7410 nassertr(data.size() == (size_t)(cdata->_component_width * cdata->_num_components * imgsize), CPTA_uchar(get_class_type()));
7411
7412 // Check if the format is already what we have internally.
7413 if ((cdata->_num_components == 1 && format.size() == 1) ||
7414 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
7415 (cdata->_num_components == 3 && format == "BGR") ||
7416 (cdata->_num_components == 4 && format == "BGRA")) {
7417 // The format string is already our format, so we just need to copy it.
7418 return CPTA_uchar(data);
7419 }
7420
7421 // Check if we have an alpha channel, and remember which channel we use.
7422 int alpha = -1;
7423 if (Texture::has_alpha(cdata->_format)) {
7424 alpha = cdata->_num_components - 1;
7425 }
7426
7427 // Validate the format beforehand.
7428 for (size_t i = 0; i < format.size(); ++i) {
7429 if (format[i] != 'B' && format[i] != 'G' && format[i] != 'R' &&
7430 format[i] != 'A' && format[i] != '0' && format[i] != '1') {
7431 gobj_cat.error() << "Unexpected component character '"
7432 << format[i] << "', expected one of RGBA01!\n";
7433 return CPTA_uchar(get_class_type());
7434 }
7435 }
7436
7437 // Create a new empty array that can hold our image.
7438 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * format.size() * cdata->_component_width, get_class_type());
7439
7440 // These ifs are for optimization of commonly used image types.
7441 if (cdata->_component_width == 1) {
7442 if (format == "RGBA" && cdata->_num_components == 4) {
7443 const uint32_t *src = (const uint32_t *)data.p();
7444 uint32_t *dst = (uint32_t *)newdata.p();
7445
7446 for (int p = 0; p < imgsize; ++p) {
7447 uint32_t v = *src++;
7448 *dst++ = ((v & 0xff00ff00u)) |
7449 ((v & 0x00ff0000u) >> 16) |
7450 ((v & 0x000000ffu) << 16);
7451 }
7452 return newdata;
7453 }
7454 if (format == "RGB" && cdata->_num_components == 4) {
7455 const uint32_t *src = (const uint32_t *)data.p();
7456 uint32_t *dst = (uint32_t *)newdata.p();
7457
7458 // Convert blocks of 4 pixels at a time, so that we can treat both the
7459 // source and destination as 32-bit integers.
7460 int blocks = imgsize >> 2;
7461 for (int i = 0; i < blocks; ++i) {
7462 uint32_t v0 = *src++;
7463 uint32_t v1 = *src++;
7464 uint32_t v2 = *src++;
7465 uint32_t v3 = *src++;
7466 *dst++ = ((v0 & 0x00ff0000u) >> 16) |
7467 ((v0 & 0x0000ff00u)) |
7468 ((v0 & 0x000000ffu) << 16) |
7469 ((v1 & 0x00ff0000u) << 8);
7470 *dst++ = ((v1 & 0x0000ff00u) >> 8) |
7471 ((v1 & 0x000000ffu) << 8) |
7472 ((v2 & 0x00ff0000u)) |
7473 ((v2 & 0x0000ff00u) << 16);
7474 *dst++ = ((v2 & 0x000000ffu)) |
7475 ((v3 & 0x00ff0000u) >> 8) |
7476 ((v3 & 0x0000ff00u) << 8) |
7477 ((v3 & 0x000000ffu) << 24);
7478 }
7479
7480 // If the image size wasn't a multiple of 4, we may have a handful of
7481 // pixels left over. Convert those the slower way.
7482 uint8_t *tail = (uint8_t *)dst;
7483 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7484 uint32_t v = *src++;
7485 *tail++ = (v & 0x00ff0000u) >> 16;
7486 *tail++ = (v & 0x0000ff00u) >> 8;
7487 *tail++ = (v & 0x000000ffu);
7488 }
7489 return newdata;
7490 }
7491 if (format == "BGR" && cdata->_num_components == 4) {
7492 const uint32_t *src = (const uint32_t *)data.p();
7493 uint32_t *dst = (uint32_t *)newdata.p();
7494
7495 // Convert blocks of 4 pixels at a time, so that we can treat both the
7496 // source and destination as 32-bit integers.
7497 int blocks = imgsize >> 2;
7498 for (int i = 0; i < blocks; ++i) {
7499 uint32_t v0 = *src++;
7500 uint32_t v1 = *src++;
7501 uint32_t v2 = *src++;
7502 uint32_t v3 = *src++;
7503 *dst++ = (v0 & 0x00ffffffu) | ((v1 & 0x000000ffu) << 24);
7504 *dst++ = ((v1 & 0x00ffff00u) >> 8) | ((v2 & 0x0000ffffu) << 16);
7505 *dst++ = ((v2 & 0x00ff0000u) >> 16) | ((v3 & 0x00ffffffu) << 8);
7506 }
7507
7508 // If the image size wasn't a multiple of 4, we may have a handful of
7509 // pixels left over. Convert those the slower way.
7510 uint8_t *tail = (uint8_t *)dst;
7511 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7512 uint32_t v = *src++;
7513 *tail++ = (v & 0x000000ffu);
7514 *tail++ = (v & 0x0000ff00u) >> 8;
7515 *tail++ = (v & 0x00ff0000u) >> 16;
7516 }
7517 return newdata;
7518 }
7519 const uint8_t *src = (const uint8_t *)data.p();
7520 uint8_t *dst = (uint8_t *)newdata.p();
7521
7522 if (format == "RGB" && cdata->_num_components == 3) {
7523 for (int i = 0; i < imgsize; ++i) {
7524 *dst++ = src[2];
7525 *dst++ = src[1];
7526 *dst++ = src[0];
7527 src += 3;
7528 }
7529 return newdata;
7530 }
7531 if (format == "A" && cdata->_num_components != 3) {
7532 // We can generally rely on alpha to be the last component.
7533 for (int p = 0; p < imgsize; ++p) {
7534 dst[p] = src[alpha];
7535 src += cdata->_num_components;
7536 }
7537 return newdata;
7538 }
7539 // Fallback case for other 8-bit-per-channel formats.
7540 for (int p = 0; p < imgsize; ++p) {
7541 for (size_t i = 0; i < format.size(); ++i) {
7542 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7543 *dst++ = src[0];
7544 } else if (format[i] == 'G') {
7545 *dst++ = src[1];
7546 } else if (format[i] == 'R') {
7547 *dst++ = src[2];
7548 } else if (format[i] == 'A') {
7549 if (alpha >= 0) {
7550 *dst++ = src[alpha];
7551 } else {
7552 *dst++ = 0xff;
7553 }
7554 } else if (format[i] == '1') {
7555 *dst++ = 0xff;
7556 } else {
7557 *dst++ = 0x00;
7558 }
7559 }
7560 src += cdata->_num_components;
7561 }
7562 return newdata;
7563 }
7564
7565 // The slow and general case.
7566 for (int p = 0; p < imgsize; ++p) {
7567 for (size_t i = 0; i < format.size(); ++i) {
7568 int component = 0;
7569 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7570 component = 0;
7571 } else if (format[i] == 'G') {
7572 component = 1;
7573 } else if (format[i] == 'R') {
7574 component = 2;
7575 } else if (format[i] == 'A') {
7576 if (alpha >= 0) {
7577 component = alpha;
7578 } else {
7579 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7580 continue;
7581 }
7582 } else if (format[i] == '1') {
7583 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7584 continue;
7585 } else {
7586 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), 0, cdata->_component_width);
7587 continue;
7588 }
7589 memcpy((void*)(newdata + (p * format.size() + i) * cdata->_component_width),
7590 (void*)(data + (p * cdata->_num_components + component) * cdata->_component_width),
7591 cdata->_component_width);
7592 }
7593 }
7594 return newdata;
7595}
7596
7597/**
7598 *
7599 */
7600void Texture::
7601do_set_simple_ram_image(CData *cdata, CPTA_uchar image, int x_size, int y_size) {
7602 nassertv(cdata->_texture_type == TT_2d_texture);
7603 size_t expected_page_size = (size_t)(x_size * y_size * 4);
7604 nassertv(image.size() == expected_page_size);
7605
7606 cdata->_simple_x_size = x_size;
7607 cdata->_simple_y_size = y_size;
7608 cdata->_simple_ram_image._image = image.cast_non_const();
7609 cdata->_simple_ram_image._page_size = image.size();
7610 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
7611 cdata->inc_simple_image_modified();
7612}
7613
7614/**
7615 *
7616 */
7617int Texture::
7618do_get_expected_num_mipmap_levels(const CData *cdata) const {
7619 if (cdata->_texture_type == Texture::TT_buffer_texture) {
7620 return 1;
7621 }
7622 int size = max(cdata->_x_size, cdata->_y_size);
7623 if (cdata->_texture_type == Texture::TT_3d_texture) {
7624 size = max(size, cdata->_z_size);
7625 }
7626 int count = 1;
7627 while (size > 1) {
7628 size >>= 1;
7629 ++count;
7630 }
7631 return count;
7632}
7633
7634/**
7635 *
7636 */
7637size_t Texture::
7638do_get_ram_mipmap_page_size(const CData *cdata, int n) const {
7639 if (cdata->_ram_image_compression != CM_off) {
7640 if (n >= 0 && n < (int)cdata->_ram_images.size()) {
7641 return cdata->_ram_images[n]._page_size;
7642 }
7643 return 0;
7644 } else {
7645 return do_get_expected_ram_mipmap_page_size(cdata, n);
7646 }
7647}
7648
7649/**
7650 *
7651 */
7652int Texture::
7653do_get_expected_mipmap_x_size(const CData *cdata, int n) const {
7654 int size = max(cdata->_x_size, 1);
7655 while (n > 0 && size > 1) {
7656 size >>= 1;
7657 --n;
7658 }
7659 return size;
7660}
7661
7662/**
7663 *
7664 */
7665int Texture::
7666do_get_expected_mipmap_y_size(const CData *cdata, int n) const {
7667 int size = max(cdata->_y_size, 1);
7668 while (n > 0 && size > 1) {
7669 size >>= 1;
7670 --n;
7671 }
7672 return size;
7673}
7674
7675/**
7676 *
7677 */
7678int Texture::
7679do_get_expected_mipmap_z_size(const CData *cdata, int n) const {
7680 // 3-D textures have a different number of pages per each mipmap level.
7681 // Other kinds of textures--especially, cube map textures--always have the
7682 // same.
7683 if (cdata->_texture_type == Texture::TT_3d_texture) {
7684 int size = max(cdata->_z_size, 1);
7685 while (n > 0 && size > 1) {
7686 size >>= 1;
7687 --n;
7688 }
7689 return size;
7690
7691 } else {
7692 return cdata->_z_size;
7693 }
7694}
7695
7696/**
7697 *
7698 */
7699void Texture::
7700do_clear_simple_ram_image(CData *cdata) {
7701 cdata->_simple_x_size = 0;
7702 cdata->_simple_y_size = 0;
7703 cdata->_simple_ram_image._image.clear();
7704 cdata->_simple_ram_image._page_size = 0;
7705 cdata->_simple_image_date_generated = 0;
7706
7707 // We allow this exception: we update the _simple_image_modified here, since
7708 // no one really cares much about that anyway, and it's convenient to do it
7709 // here.
7710 cdata->inc_simple_image_modified();
7711}
7712
7713/**
7714 *
7715 */
7716void Texture::
7717do_clear_ram_mipmap_images(CData *cdata) {
7718 if (!cdata->_ram_images.empty()) {
7719 cdata->_ram_images.erase(cdata->_ram_images.begin() + 1, cdata->_ram_images.end());
7720 }
7721}
7722
7723/**
7724 * Generates the RAM mipmap images for this texture, first uncompressing it as
7725 * required. Will recompress the image if it was originally compressed,
7726 * unless allow_recompress is true.
7727 */
7728void Texture::
7729do_generate_ram_mipmap_images(CData *cdata, bool allow_recompress) {
7730 nassertv(do_has_ram_image(cdata));
7731
7732 if (do_get_expected_num_mipmap_levels(cdata) == 1) {
7733 // Don't bother.
7734 return;
7735 }
7736
7737 RamImage orig_compressed_image;
7738 CompressionMode orig_compression_mode = CM_off;
7739
7740 if (cdata->_ram_image_compression != CM_off) {
7741 // The RAM image is compressed. This means we need to uncompress it in
7742 // order to generate mipmap images. Save the original first, to avoid
7743 // lossy recompression.
7744 orig_compressed_image = cdata->_ram_images[0];
7745 orig_compression_mode = cdata->_ram_image_compression;
7746
7747 // Now try to get the uncompressed source image.
7748 do_get_uncompressed_ram_image(cdata);
7749
7750 if (cdata->_ram_image_compression != CM_off) {
7751 gobj_cat.error()
7752 << "Cannot generate mipmap levels for image with compression "
7753 << cdata->_ram_image_compression << "\n";
7754 return;
7755 }
7756 }
7757
7758 do_clear_ram_mipmap_images(cdata);
7759
7760 if (gobj_cat.is_debug()) {
7761 gobj_cat.debug()
7762 << "Generating mipmap levels for " << *this << "\n";
7763 }
7764
7765 if (cdata->_texture_type == Texture::TT_3d_texture && cdata->_z_size != 1) {
7766 // Eek, a 3-D texture.
7767 int x_size = cdata->_x_size;
7768 int y_size = cdata->_y_size;
7769 int z_size = cdata->_z_size;
7770 int n = 0;
7771 while (x_size > 1 || y_size > 1 || z_size > 1) {
7772 cdata->_ram_images.push_back(RamImage());
7773 do_filter_3d_mipmap_level(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7774 x_size, y_size, z_size);
7775 x_size = max(x_size >> 1, 1);
7776 y_size = max(y_size >> 1, 1);
7777 z_size = max(z_size >> 1, 1);
7778 ++n;
7779 }
7780
7781 } else {
7782 // A 1-D, 2-D, or cube map texture.
7783 int x_size = cdata->_x_size;
7784 int y_size = cdata->_y_size;
7785 int n = 0;
7786 while (x_size > 1 || y_size > 1) {
7787 cdata->_ram_images.push_back(RamImage());
7788 do_filter_2d_mipmap_pages(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7789 x_size, y_size);
7790 x_size = max(x_size >> 1, 1);
7791 y_size = max(y_size >> 1, 1);
7792 ++n;
7793 }
7794 }
7795
7796 if (orig_compression_mode != CM_off && allow_recompress) {
7797 // Now attempt to recompress the mipmap images according to the original
7798 // compression mode. We don't need to bother compressing the first image
7799 // (it was already compressed, after all), so temporarily remove it from
7800 // the top of the mipmap stack, and compress all of the rest of them
7801 // instead.
7802 nassertv(cdata->_ram_images.size() > 1);
7803 int l0_x_size = cdata->_x_size;
7804 int l0_y_size = cdata->_y_size;
7805 int l0_z_size = cdata->_z_size;
7806 cdata->_x_size = do_get_expected_mipmap_x_size(cdata, 1);
7807 cdata->_y_size = do_get_expected_mipmap_y_size(cdata, 1);
7808 cdata->_z_size = do_get_expected_mipmap_z_size(cdata, 1);
7809 RamImage uncompressed_image = cdata->_ram_images[0];
7810 cdata->_ram_images.erase(cdata->_ram_images.begin());
7811
7812 bool success = do_compress_ram_image(cdata, orig_compression_mode, QL_default, nullptr);
7813 // Now restore the toplevel image.
7814 if (success) {
7815 if (gobj_cat.is_debug()) {
7816 gobj_cat.debug()
7817 << "Compressed " << get_name() << " generated mipmaps with "
7818 << cdata->_ram_image_compression << "\n";
7819 }
7820 cdata->_ram_images.insert(cdata->_ram_images.begin(), orig_compressed_image);
7821 } else {
7822 cdata->_ram_images.insert(cdata->_ram_images.begin(), uncompressed_image);
7823 }
7824 cdata->_x_size = l0_x_size;
7825 cdata->_y_size = l0_y_size;
7826 cdata->_z_size = l0_z_size;
7827 }
7828}
7829
7830/**
7831 *
7832 */
7833void Texture::
7834do_set_pad_size(CData *cdata, int x, int y, int z) {
7835 if (x > cdata->_x_size) {
7836 x = cdata->_x_size;
7837 }
7838 if (y > cdata->_y_size) {
7839 y = cdata->_y_size;
7840 }
7841 if (z > cdata->_z_size) {
7842 z = cdata->_z_size;
7843 }
7844
7845 cdata->_pad_x_size = x;
7846 cdata->_pad_y_size = y;
7847 cdata->_pad_z_size = z;
7848}
7849
7850/**
7851 * Returns true if we can safely call do_reload_ram_image() in order to make
7852 * the image available, or false if we shouldn't do this (because we know from
7853 * a priori knowledge that it wouldn't work anyway).
7854 */
7855bool Texture::
7856do_can_reload(const CData *cdata) const {
7857 return (cdata->_loaded_from_image && !cdata->_fullpath.empty());
7858}
7859
7860/**
7861 *
7862 */
7863bool Texture::
7864do_reload(CData *cdata) {
7865 if (do_can_reload(cdata)) {
7866 do_clear_ram_image(cdata);
7867 do_reload_ram_image(cdata, true);
7868 if (do_has_ram_image(cdata)) {
7869 // An explicit call to reload() should increment image_modified.
7870 cdata->inc_image_modified();
7871 return true;
7872 }
7873 return false;
7874 }
7875
7876 // We don't have a filename to load from.
7877 return false;
7878}
7879
7880/**
7881 * Returns true if there is a rawdata image that we have available to write to
7882 * the bam stream. For a normal Texture, this is the same thing as
7883 * do_has_ram_image(), but a movie texture might define it differently.
7884 */
7885bool Texture::
7886do_has_bam_rawdata(const CData *cdata) const {
7887 return do_has_ram_image(cdata);
7888}
7889
7890/**
7891 * If do_has_bam_rawdata() returned false, this attempts to reload the rawdata
7892 * image if possible.
7893 */
7894void Texture::
7895do_get_bam_rawdata(CData *cdata) {
7896 do_get_ram_image(cdata);
7897}
7898
7899/**
7900 * Internal method to convert pixel data from the indicated PNMImage into the
7901 * given ram_image.
7902 */
7903void Texture::
7904convert_from_pnmimage(PTA_uchar &image, size_t page_size,
7905 int row_stride, int x, int y, int z,
7906 const PNMImage &pnmimage, int num_components,
7907 int component_width) {
7908 int x_size = pnmimage.get_x_size();
7909 int y_size = pnmimage.get_y_size();
7910 xelval maxval = pnmimage.get_maxval();
7911 int pixel_size = num_components * component_width;
7912
7913 int row_skip = 0;
7914 if (row_stride == 0) {
7915 row_stride = x_size;
7916 } else {
7917 row_skip = (row_stride - x_size) * pixel_size;
7918 nassertv(row_skip >= 0);
7919 }
7920
7921 bool is_grayscale = (num_components == 1 || num_components == 2);
7922 bool has_alpha = (num_components == 2 || num_components == 4);
7923 bool img_has_alpha = pnmimage.has_alpha();
7924
7925 int idx = page_size * z;
7926 nassertv(idx + page_size <= image.size());
7927 unsigned char *p = &image[idx];
7928
7929 if (x != 0 || y != 0) {
7930 p += (row_stride * y + x) * pixel_size;
7931 }
7932
7933 if (maxval == 255 && component_width == 1) {
7934 // Most common case: one byte per pixel, and the source image shows a
7935 // maxval of 255. No scaling is necessary. Because this is such a common
7936 // case, we break it out per component for best performance.
7937 const xel *array = pnmimage.get_array();
7938 switch (num_components) {
7939 case 1:
7940 for (int j = y_size-1; j >= 0; j--) {
7941 const xel *row = array + j * x_size;
7942 for (int i = 0; i < x_size; i++) {
7943 *p++ = (uchar)PPM_GETB(row[i]);
7944 }
7945 p += row_skip;
7946 }
7947 break;
7948
7949 case 2:
7950 if (img_has_alpha) {
7951 const xelval *alpha = pnmimage.get_alpha_array();
7952 for (int j = y_size-1; j >= 0; j--) {
7953 const xel *row = array + j * x_size;
7954 const xelval *alpha_row = alpha + j * x_size;
7955 for (int i = 0; i < x_size; i++) {
7956 *p++ = (uchar)PPM_GETB(row[i]);
7957 *p++ = (uchar)alpha_row[i];
7958 }
7959 p += row_skip;
7960 }
7961 } else {
7962 for (int j = y_size-1; j >= 0; j--) {
7963 const xel *row = array + j * x_size;
7964 for (int i = 0; i < x_size; i++) {
7965 *p++ = (uchar)PPM_GETB(row[i]);
7966 *p++ = (uchar)255;
7967 }
7968 p += row_skip;
7969 }
7970 }
7971 break;
7972
7973 case 3:
7974 for (int j = y_size-1; j >= 0; j--) {
7975 const xel *row = array + j * x_size;
7976 for (int i = 0; i < x_size; i++) {
7977 *p++ = (uchar)PPM_GETB(row[i]);
7978 *p++ = (uchar)PPM_GETG(row[i]);
7979 *p++ = (uchar)PPM_GETR(row[i]);
7980 }
7981 p += row_skip;
7982 }
7983 break;
7984
7985 case 4:
7986 if (img_has_alpha) {
7987 const xelval *alpha = pnmimage.get_alpha_array();
7988 for (int j = y_size-1; j >= 0; j--) {
7989 const xel *row = array + j * x_size;
7990 const xelval *alpha_row = alpha + j * x_size;
7991 for (int i = 0; i < x_size; i++) {
7992 *p++ = (uchar)PPM_GETB(row[i]);
7993 *p++ = (uchar)PPM_GETG(row[i]);
7994 *p++ = (uchar)PPM_GETR(row[i]);
7995 *p++ = (uchar)alpha_row[i];
7996 }
7997 p += row_skip;
7998 }
7999 } else {
8000 for (int j = y_size-1; j >= 0; j--) {
8001 const xel *row = array + j * x_size;
8002 for (int i = 0; i < x_size; i++) {
8003 *p++ = (uchar)PPM_GETB(row[i]);
8004 *p++ = (uchar)PPM_GETG(row[i]);
8005 *p++ = (uchar)PPM_GETR(row[i]);
8006 *p++ = (uchar)255;
8007 }
8008 p += row_skip;
8009 }
8010 }
8011 break;
8012
8013 default:
8014 nassertv(num_components >= 1 && num_components <= 4);
8015 break;
8016 }
8017
8018 } else if (maxval == 65535 && component_width == 2) {
8019 // Another possible case: two bytes per pixel, and the source image shows
8020 // a maxval of 65535. Again, no scaling is necessary.
8021 for (int j = y_size-1; j >= 0; j--) {
8022 for (int i = 0; i < x_size; i++) {
8023 if (is_grayscale) {
8024 store_unscaled_short(p, pnmimage.get_gray_val(i, j));
8025 } else {
8026 store_unscaled_short(p, pnmimage.get_blue_val(i, j));
8027 store_unscaled_short(p, pnmimage.get_green_val(i, j));
8028 store_unscaled_short(p, pnmimage.get_red_val(i, j));
8029 }
8030 if (has_alpha) {
8031 if (img_has_alpha) {
8032 store_unscaled_short(p, pnmimage.get_alpha_val(i, j));
8033 } else {
8034 store_unscaled_short(p, 65535);
8035 }
8036 }
8037 }
8038 p += row_skip;
8039 }
8040
8041 } else if (component_width == 1) {
8042 // A less common case: one byte per pixel, but the maxval is something
8043 // other than 255. In this case, we should scale the pixel values up to
8044 // the appropriate amount.
8045 double scale = 255.0 / (double)maxval;
8046
8047 for (int j = y_size-1; j >= 0; j--) {
8048 for (int i = 0; i < x_size; i++) {
8049 if (is_grayscale) {
8050 store_scaled_byte(p, pnmimage.get_gray_val(i, j), scale);
8051 } else {
8052 store_scaled_byte(p, pnmimage.get_blue_val(i, j), scale);
8053 store_scaled_byte(p, pnmimage.get_green_val(i, j), scale);
8054 store_scaled_byte(p, pnmimage.get_red_val(i, j), scale);
8055 }
8056 if (has_alpha) {
8057 if (img_has_alpha) {
8058 store_scaled_byte(p, pnmimage.get_alpha_val(i, j), scale);
8059 } else {
8060 store_unscaled_byte(p, 255);
8061 }
8062 }
8063 }
8064 p += row_skip;
8065 }
8066
8067 } else { // component_width == 2
8068 // Another uncommon case: two bytes per pixel, and the maxval is something
8069 // other than 65535. Again, we must scale the pixel values.
8070 double scale = 65535.0 / (double)maxval;
8071
8072 for (int j = y_size-1; j >= 0; j--) {
8073 for (int i = 0; i < x_size; i++) {
8074 if (is_grayscale) {
8075 store_scaled_short(p, pnmimage.get_gray_val(i, j), scale);
8076 } else {
8077 store_scaled_short(p, pnmimage.get_blue_val(i, j), scale);
8078 store_scaled_short(p, pnmimage.get_green_val(i, j), scale);
8079 store_scaled_short(p, pnmimage.get_red_val(i, j), scale);
8080 }
8081 if (has_alpha) {
8082 if (img_has_alpha) {
8083 store_scaled_short(p, pnmimage.get_alpha_val(i, j), 1.0);
8084 } else {
8085 store_unscaled_short(p, 65535);
8086 }
8087 }
8088 }
8089 p += row_skip;
8090 }
8091 }
8092}
8093
8094/**
8095 * Internal method to convert pixel data from the indicated PfmFile into the
8096 * given ram_image.
8097 */
8098void Texture::
8099convert_from_pfm(PTA_uchar &image, size_t page_size, int z,
8100 const PfmFile &pfm, int num_components, int component_width) {
8101 nassertv(component_width == 4); // Currently only PN_float32 is expected.
8102 int x_size = pfm.get_x_size();
8103 int y_size = pfm.get_y_size();
8104
8105 int idx = page_size * z;
8106 nassertv(idx + page_size <= image.size());
8107 PN_float32 *p = (PN_float32 *)&image[idx];
8108
8109 switch (num_components) {
8110 case 1:
8111 {
8112 for (int j = y_size-1; j >= 0; j--) {
8113 for (int i = 0; i < x_size; i++) {
8114 p[0] = pfm.get_channel(i, j, 0);
8115 ++p;
8116 }
8117 }
8118 }
8119 break;
8120
8121 case 2:
8122 {
8123 for (int j = y_size-1; j >= 0; j--) {
8124 for (int i = 0; i < x_size; i++) {
8125 p[0] = pfm.get_channel(i, j, 0);
8126 p[1] = pfm.get_channel(i, j, 1);
8127 p += 2;
8128 }
8129 }
8130 }
8131 break;
8132
8133 case 3:
8134 {
8135 // RGB -> BGR
8136 for (int j = y_size-1; j >= 0; j--) {
8137 for (int i = 0; i < x_size; i++) {
8138 p[0] = pfm.get_channel(i, j, 2);
8139 p[1] = pfm.get_channel(i, j, 1);
8140 p[2] = pfm.get_channel(i, j, 0);
8141 p += 3;
8142 }
8143 }
8144 }
8145 break;
8146
8147 case 4:
8148 {
8149 // RGBA -> BGRA
8150 for (int j = y_size-1; j >= 0; j--) {
8151 for (int i = 0; i < x_size; i++) {
8152 p[0] = pfm.get_channel(i, j, 2);
8153 p[1] = pfm.get_channel(i, j, 1);
8154 p[2] = pfm.get_channel(i, j, 0);
8155 p[3] = pfm.get_channel(i, j, 3);
8156 p += 4;
8157 }
8158 }
8159 }
8160 break;
8161
8162 default:
8163 nassert_raise("unexpected channel count");
8164 return;
8165 }
8166
8167 nassertv((unsigned char *)p == &image[idx] + page_size);
8168}
8169
8170/**
8171 * Internal method to convert pixel data to the indicated PNMImage from the
8172 * given ram_image.
8173 */
8174bool Texture::
8175convert_to_pnmimage(PNMImage &pnmimage, int x_size, int y_size,
8176 int num_components, ComponentType component_type,
8177 bool is_srgb, CPTA_uchar image, size_t page_size, int z) {
8178 xelval maxval = 0xff;
8179 if (component_type != T_unsigned_byte && component_type != T_byte) {
8180 maxval = 0xffff;
8181 }
8182 ColorSpace color_space = is_srgb ? CS_sRGB : CS_linear;
8183 pnmimage.clear(x_size, y_size, num_components, maxval, nullptr, color_space);
8184 bool has_alpha = pnmimage.has_alpha();
8185 bool is_grayscale = pnmimage.is_grayscale();
8186
8187 int idx = page_size * z;
8188 nassertr(idx + page_size <= image.size(), false);
8189
8190 xel *array = pnmimage.get_array();
8191 xelval *alpha = pnmimage.get_alpha_array();
8192
8193 switch (component_type) {
8194 case T_unsigned_byte:
8195 if (is_grayscale) {
8196 const unsigned char *p = &image[idx];
8197 if (has_alpha) {
8198 for (int j = y_size-1; j >= 0; j--) {
8199 xel *row = array + j * x_size;
8200 xelval *alpha_row = alpha + j * x_size;
8201 for (int i = 0; i < x_size; i++) {
8202 PPM_PUTB(row[i], *p++);
8203 alpha_row[i] = *p++;
8204 }
8205 }
8206 } else {
8207 for (int j = y_size-1; j >= 0; j--) {
8208 xel *row = array + j * x_size;
8209 for (int i = 0; i < x_size; i++) {
8210 PPM_PUTB(row[i], *p++);
8211 }
8212 }
8213 }
8214 nassertr(p == &image[idx] + page_size, false);
8215 } else {
8216 const unsigned char *p = &image[idx];
8217 if (has_alpha) {
8218 for (int j = y_size-1; j >= 0; j--) {
8219 xel *row = array + j * x_size;
8220 xelval *alpha_row = alpha + j * x_size;
8221 for (int i = 0; i < x_size; i++) {
8222 PPM_PUTB(row[i], *p++);
8223 PPM_PUTG(row[i], *p++);
8224 PPM_PUTR(row[i], *p++);
8225 alpha_row[i] = *p++;
8226 }
8227 }
8228 } else {
8229 for (int j = y_size-1; j >= 0; j--) {
8230 xel *row = array + j * x_size;
8231 for (int i = 0; i < x_size; i++) {
8232 PPM_PUTB(row[i], *p++);
8233 PPM_PUTG(row[i], *p++);
8234 PPM_PUTR(row[i], *p++);
8235 }
8236 }
8237 }
8238 nassertr(p == &image[idx] + page_size, false);
8239 }
8240 break;
8241
8242 case T_unsigned_short:
8243 {
8244 const uint16_t *p = (const uint16_t *)&image[idx];
8245
8246 for (int j = y_size-1; j >= 0; j--) {
8247 xel *row = array + j * x_size;
8248 xelval *alpha_row = alpha + j * x_size;
8249 for (int i = 0; i < x_size; i++) {
8250 PPM_PUTB(row[i], *p++);
8251 if (!is_grayscale) {
8252 PPM_PUTG(row[i], *p++);
8253 PPM_PUTR(row[i], *p++);
8254 }
8255 if (has_alpha) {
8256 alpha_row[i] = *p++;
8257 }
8258 }
8259 }
8260 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8261 }
8262 break;
8263
8264 case T_unsigned_int:
8265 {
8266 const uint32_t *p = (const uint32_t *)&image[idx];
8267
8268 for (int j = y_size-1; j >= 0; j--) {
8269 xel *row = array + j * x_size;
8270 xelval *alpha_row = alpha + j * x_size;
8271 for (int i = 0; i < x_size; i++) {
8272 PPM_PUTB(row[i], (*p++) >> 16u);
8273 if (!is_grayscale) {
8274 PPM_PUTG(row[i], (*p++) >> 16u);
8275 PPM_PUTR(row[i], (*p++) >> 16u);
8276 }
8277 if (has_alpha) {
8278 alpha_row[i] = (*p++) >> 16u;
8279 }
8280 }
8281 }
8282 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8283 }
8284 break;
8285
8286 case T_half_float:
8287 {
8288 const unsigned char *p = &image[idx];
8289
8290 for (int j = y_size-1; j >= 0; j--) {
8291 for (int i = 0; i < x_size; i++) {
8292 pnmimage.set_blue(i, j, get_half_float(p));
8293 if (!is_grayscale) {
8294 pnmimage.set_green(i, j, get_half_float(p));
8295 pnmimage.set_red(i, j, get_half_float(p));
8296 }
8297 if (has_alpha) {
8298 pnmimage.set_alpha(i, j, get_half_float(p));
8299 }
8300 }
8301 }
8302 nassertr(p == &image[idx] + page_size, false);
8303 }
8304 break;
8305
8306 default:
8307 return false;
8308 }
8309
8310 return true;
8311}
8312
8313/**
8314 * Internal method to convert pixel data to the indicated PfmFile from the
8315 * given ram_image.
8316 */
8317bool Texture::
8318convert_to_pfm(PfmFile &pfm, int x_size, int y_size,
8319 int num_components, int component_width,
8320 CPTA_uchar image, size_t page_size, int z) {
8321 nassertr(component_width == 4, false); // Currently only PN_float32 is expected.
8322 pfm.clear(x_size, y_size, num_components);
8323
8324 int idx = page_size * z;
8325 nassertr(idx + page_size <= image.size(), false);
8326 const PN_float32 *p = (const PN_float32 *)&image[idx];
8327
8328 switch (num_components) {
8329 case 1:
8330 for (int j = y_size-1; j >= 0; j--) {
8331 for (int i = 0; i < x_size; i++) {
8332 pfm.set_channel(i, j, 0, p[0]);
8333 ++p;
8334 }
8335 }
8336 break;
8337
8338 case 2:
8339 for (int j = y_size-1; j >= 0; j--) {
8340 for (int i = 0; i < x_size; i++) {
8341 pfm.set_channel(i, j, 0, p[0]);
8342 pfm.set_channel(i, j, 1, p[1]);
8343 p += 2;
8344 }
8345 }
8346 break;
8347
8348 case 3:
8349 // BGR -> RGB
8350 for (int j = y_size-1; j >= 0; j--) {
8351 for (int i = 0; i < x_size; i++) {
8352 pfm.set_channel(i, j, 2, p[0]);
8353 pfm.set_channel(i, j, 1, p[1]);
8354 pfm.set_channel(i, j, 0, p[2]);
8355 p += 3;
8356 }
8357 }
8358 break;
8359
8360 case 4:
8361 // BGRA -> RGBA
8362 for (int j = y_size-1; j >= 0; j--) {
8363 for (int i = 0; i < x_size; i++) {
8364 pfm.set_channel(i, j, 2, p[0]);
8365 pfm.set_channel(i, j, 1, p[1]);
8366 pfm.set_channel(i, j, 0, p[2]);
8367 pfm.set_channel(i, j, 3, p[3]);
8368 p += 4;
8369 }
8370 }
8371 break;
8372
8373 default:
8374 nassert_raise("unexpected channel count");
8375 return false;
8376 }
8377
8378 nassertr((unsigned char *)p == &image[idx] + page_size, false);
8379 return true;
8380}
8381
8382/**
8383 * Called by read_dds for a DDS file in BGR8 format.
8384 */
8385PTA_uchar Texture::
8386read_dds_level_bgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8387 // This is in order B, G, R.
8388 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8389 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8390
8391 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8392 size_t row_bytes = x_size * 3;
8393 PTA_uchar image = PTA_uchar::empty_array(size);
8394 for (int y = y_size - 1; y >= 0; --y) {
8395 unsigned char *p = image.p() + y * row_bytes;
8396 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8397 in.read((char *)p, row_bytes);
8398 }
8399
8400 return image;
8401}
8402
8403/**
8404 * Called by read_dds for a DDS file in RGB8 format.
8405 */
8406PTA_uchar Texture::
8407read_dds_level_rgb8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8408 // This is in order R, G, B.
8409 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8410 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8411
8412 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8413 size_t row_bytes = x_size * 3;
8414 PTA_uchar image = PTA_uchar::empty_array(size);
8415 for (int y = y_size - 1; y >= 0; --y) {
8416 unsigned char *p = image.p() + y * row_bytes;
8417 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8418 in.read((char *)p, row_bytes);
8419
8420 // Now reverse the r, g, b triples.
8421 for (int x = 0; x < x_size; ++x) {
8422 unsigned char r = p[0];
8423 p[0] = p[2];
8424 p[2] = r;
8425 p += 3;
8426 }
8427 nassertr(p <= image.p() + size, PTA_uchar());
8428 }
8429
8430 return image;
8431}
8432
8433/**
8434 * Called by read_dds for a DDS file in ABGR8 format.
8435 */
8436PTA_uchar Texture::
8437read_dds_level_abgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8438 // This is laid out in order R, G, B, A.
8439 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8440 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8441
8442 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8443 size_t row_bytes = x_size * 4;
8444 PTA_uchar image = PTA_uchar::empty_array(size);
8445 for (int y = y_size - 1; y >= 0; --y) {
8446 unsigned char *p = image.p() + y * row_bytes;
8447 in.read((char *)p, row_bytes);
8448
8449 uint32_t *pw = (uint32_t *)p;
8450 for (int x = 0; x < x_size; ++x) {
8451 uint32_t w = *pw;
8452#ifdef WORDS_BIGENDIAN
8453 // bigendian: convert R, G, B, A to B, G, R, A.
8454 w = ((w & 0xff00) << 16) | ((w & 0xff000000U) >> 16) | (w & 0xff00ff);
8455#else
8456 // littendian: convert A, B, G, R to to A, R, G, B.
8457 w = ((w & 0xff) << 16) | ((w & 0xff0000) >> 16) | (w & 0xff00ff00U);
8458#endif
8459 *pw = w;
8460 ++pw;
8461 }
8462 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8463 }
8464
8465 return image;
8466}
8467
8468/**
8469 * Called by read_dds for a DDS file in RGBA8 format.
8470 */
8471PTA_uchar Texture::
8472read_dds_level_rgba8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8473 // This is actually laid out in order B, G, R, A.
8474 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8475 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8476
8477 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8478 size_t row_bytes = x_size * 4;
8479 PTA_uchar image = PTA_uchar::empty_array(size);
8480 for (int y = y_size - 1; y >= 0; --y) {
8481 unsigned char *p = image.p() + y * row_bytes;
8482 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8483 in.read((char *)p, row_bytes);
8484 }
8485
8486 return image;
8487}
8488
8489/**
8490 * Called by read_dds for a DDS file in ABGR16 format.
8491 */
8492PTA_uchar Texture::
8493read_dds_level_abgr16(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8494 // This is laid out in order R, G, B, A.
8495 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8496 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8497
8498 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8499 size_t row_bytes = x_size * 8;
8500 PTA_uchar image = PTA_uchar::empty_array(size);
8501 for (int y = y_size - 1; y >= 0; --y) {
8502 unsigned char *p = image.p() + y * row_bytes;
8503 in.read((char *)p, row_bytes);
8504
8505 uint16_t *pw = (uint16_t *)p;
8506 for (int x = 0; x < x_size; ++x) {
8507 swap(pw[0], pw[2]);
8508 pw += 4;
8509 }
8510 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8511 }
8512
8513 return image;
8514}
8515
8516/**
8517 * Called by read_dds for a DDS file in ABGR32 format.
8518 */
8519PTA_uchar Texture::
8520read_dds_level_abgr32(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8521 // This is laid out in order R, G, B, A.
8522 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8523 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8524
8525 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8526 size_t row_bytes = x_size * 16;
8527 nassertr(row_bytes * y_size == size, PTA_uchar());
8528 PTA_uchar image = PTA_uchar::empty_array(size);
8529 for (int y = y_size - 1; y >= 0; --y) {
8530 unsigned char *p = image.p() + y * row_bytes;
8531 in.read((char *)p, row_bytes);
8532
8533 uint32_t *pw = (uint32_t *)p;
8534 for (int x = 0; x < x_size; ++x) {
8535 swap(pw[0], pw[2]);
8536 pw += 4;
8537 }
8538 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8539 }
8540
8541 return image;
8542}
8543
8544/**
8545 * Called by read_dds for a DDS file that needs no transformations applied.
8546 */
8547PTA_uchar Texture::
8548read_dds_level_raw(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8549 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8550 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8551
8552 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8553 size_t row_bytes = x_size * cdata->_num_components * cdata->_component_width;
8554 nassertr(row_bytes * y_size == size, PTA_uchar());
8555 PTA_uchar image = PTA_uchar::empty_array(size);
8556 for (int y = y_size - 1; y >= 0; --y) {
8557 unsigned char *p = image.p() + y * row_bytes;
8558 in.read((char *)p, row_bytes);
8559 }
8560
8561 return image;
8562}
8563
8564/**
8565 * Called by read_dds for a DDS file whose format isn't one we've specifically
8566 * optimized.
8567 */
8568PTA_uchar Texture::
8569read_dds_level_generic_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8570 int n, istream &in) {
8571 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8572 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8573
8574 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8575
8576 // MS says the pitch can be supplied in the header file and must be DWORD
8577 // aligned, but this appears to apply to level 0 mipmaps only (where it
8578 // almost always will be anyway). Other mipmap levels seem to be tightly
8579 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8580 if (n == 0) {
8581 pitch = ((pitch + 3) / 4) * 4;
8582 if (header.dds_flags & DDSD_PITCH) {
8583 pitch = header.pitch;
8584 }
8585 }
8586
8587 int bpp = header.pf.rgb_bitcount / 8;
8588 int skip_bytes = pitch - (bpp * x_size);
8589 nassertr(skip_bytes >= 0, PTA_uchar());
8590
8591 unsigned int r_mask = header.pf.r_mask;
8592 unsigned int g_mask = header.pf.g_mask;
8593 unsigned int b_mask = header.pf.b_mask;
8594 unsigned int a_mask = header.pf.a_mask;
8595
8596 // Determine the number of bits to shift each mask to the right so that the
8597 // lowest on bit is at bit 0.
8598 int r_shift = get_lowest_on_bit(r_mask);
8599 int g_shift = get_lowest_on_bit(g_mask);
8600 int b_shift = get_lowest_on_bit(b_mask);
8601 int a_shift = get_lowest_on_bit(a_mask);
8602
8603 // Then determine the scale factor required to raise the highest color value
8604 // to 0xff000000.
8605 unsigned int r_scale = 0;
8606 if (r_mask != 0) {
8607 r_scale = 0xff000000 / (r_mask >> r_shift);
8608 }
8609 unsigned int g_scale = 0;
8610 if (g_mask != 0) {
8611 g_scale = 0xff000000 / (g_mask >> g_shift);
8612 }
8613 unsigned int b_scale = 0;
8614 if (b_mask != 0) {
8615 b_scale = 0xff000000 / (b_mask >> b_shift);
8616 }
8617 unsigned int a_scale = 0;
8618 if (a_mask != 0) {
8619 a_scale = 0xff000000 / (a_mask >> a_shift);
8620 }
8621
8622 bool add_alpha = has_alpha(cdata->_format);
8623
8624 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8625 size_t row_bytes = x_size * cdata->_num_components;
8626 PTA_uchar image = PTA_uchar::empty_array(size);
8627 for (int y = y_size - 1; y >= 0; --y) {
8628 unsigned char *p = image.p() + y * row_bytes;
8629 for (int x = 0; x < x_size; ++x) {
8630
8631 // Read a little-endian numeric value of bpp bytes.
8632 unsigned int pixel = 0;
8633 int shift = 0;
8634 for (int bi = 0; bi < bpp; ++bi) {
8635 unsigned int ch = (unsigned char)in.get();
8636 pixel |= (ch << shift);
8637 shift += 8;
8638 }
8639
8640 // Then break apart that value into its R, G, B, and maybe A components.
8641 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8642 unsigned int g = (((pixel & g_mask) >> g_shift) * g_scale) >> 24;
8643 unsigned int b = (((pixel & b_mask) >> b_shift) * b_scale) >> 24;
8644
8645 // Store the components in the Texture's image data.
8646 store_unscaled_byte(p, b);
8647 store_unscaled_byte(p, g);
8648 store_unscaled_byte(p, r);
8649 if (add_alpha) {
8650 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8651 store_unscaled_byte(p, a);
8652 }
8653 }
8654 nassertr(p <= image.p() + size, PTA_uchar());
8655 for (int bi = 0; bi < skip_bytes; ++bi) {
8656 in.get();
8657 }
8658 }
8659
8660 return image;
8661}
8662
8663/**
8664 * Called by read_dds for a DDS file in uncompressed luminance or luminance-
8665 * alpha format.
8666 */
8667PTA_uchar Texture::
8668read_dds_level_luminance_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8669 int n, istream &in) {
8670 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8671 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8672
8673 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8674
8675 // MS says the pitch can be supplied in the header file and must be DWORD
8676 // aligned, but this appears to apply to level 0 mipmaps only (where it
8677 // almost always will be anyway). Other mipmap levels seem to be tightly
8678 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8679 if (n == 0) {
8680 pitch = ((pitch + 3) / 4) * 4;
8681 if (header.dds_flags & DDSD_PITCH) {
8682 pitch = header.pitch;
8683 }
8684 }
8685
8686 int bpp = header.pf.rgb_bitcount / 8;
8687 int skip_bytes = pitch - (bpp * x_size);
8688 nassertr(skip_bytes >= 0, PTA_uchar());
8689
8690 unsigned int r_mask = header.pf.r_mask;
8691 unsigned int a_mask = header.pf.a_mask;
8692
8693 // Determine the number of bits to shift each mask to the right so that the
8694 // lowest on bit is at bit 0.
8695 int r_shift = get_lowest_on_bit(r_mask);
8696 int a_shift = get_lowest_on_bit(a_mask);
8697
8698 // Then determine the scale factor required to raise the highest color value
8699 // to 0xff000000.
8700 unsigned int r_scale = 0;
8701 if (r_mask != 0) {
8702 r_scale = 0xff000000 / (r_mask >> r_shift);
8703 }
8704 unsigned int a_scale = 0;
8705 if (a_mask != 0) {
8706 a_scale = 0xff000000 / (a_mask >> a_shift);
8707 }
8708
8709 bool add_alpha = has_alpha(cdata->_format);
8710
8711 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8712 size_t row_bytes = x_size * cdata->_num_components;
8713 PTA_uchar image = PTA_uchar::empty_array(size);
8714 for (int y = y_size - 1; y >= 0; --y) {
8715 unsigned char *p = image.p() + y * row_bytes;
8716 for (int x = 0; x < x_size; ++x) {
8717
8718 // Read a little-endian numeric value of bpp bytes.
8719 unsigned int pixel = 0;
8720 int shift = 0;
8721 for (int bi = 0; bi < bpp; ++bi) {
8722 unsigned int ch = (unsigned char)in.get();
8723 pixel |= (ch << shift);
8724 shift += 8;
8725 }
8726
8727 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8728
8729 // Store the components in the Texture's image data.
8730 store_unscaled_byte(p, r);
8731 if (add_alpha) {
8732 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8733 store_unscaled_byte(p, a);
8734 }
8735 }
8736 nassertr(p <= image.p() + size, PTA_uchar());
8737 for (int bi = 0; bi < skip_bytes; ++bi) {
8738 in.get();
8739 }
8740 }
8741
8742 return image;
8743}
8744
8745/**
8746 * Called by read_dds for DXT1 file format.
8747 */
8748PTA_uchar Texture::
8749read_dds_level_bc1(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8750 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8751 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8752
8753 static const int div = 4;
8754 static const int block_bytes = 8;
8755
8756 // The DXT1 image is divided into num_rows x num_cols blocks, where each
8757 // block represents 4x4 pixels.
8758 int num_cols = max(div, x_size) / div;
8759 int num_rows = max(div, y_size) / div;
8760 int row_length = num_cols * block_bytes;
8761 int linear_size = row_length * num_rows;
8762
8763 if (n == 0) {
8764 if (header.dds_flags & DDSD_LINEARSIZE) {
8765 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8766 }
8767 }
8768
8769 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8770
8771 if (y_size >= 4) {
8772 // We have to flip the image as we read it, because of DirectX's inverted
8773 // sense of up. That means we (a) reverse the order of the rows of blocks
8774 // . . .
8775 for (int ri = num_rows - 1; ri >= 0; --ri) {
8776 unsigned char *p = image.p() + row_length * ri;
8777 in.read((char *)p, row_length);
8778
8779 for (int ci = 0; ci < num_cols; ++ci) {
8780 // . . . and (b) within each block, we reverse the 4 individual rows
8781 // of 4 pixels.
8782 uint32_t *cells = (uint32_t *)p;
8783 uint32_t w = cells[1];
8784 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8785 cells[1] = w;
8786
8787 p += block_bytes;
8788 }
8789 }
8790
8791 } else if (y_size >= 2) {
8792 // To invert a two-pixel high image, we just flip two rows within a cell.
8793 unsigned char *p = image.p();
8794 in.read((char *)p, row_length);
8795
8796 for (int ci = 0; ci < num_cols; ++ci) {
8797 uint32_t *cells = (uint32_t *)p;
8798 uint32_t w = cells[1];
8799 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8800 cells[1] = w;
8801
8802 p += block_bytes;
8803 }
8804
8805 } else if (y_size >= 1) {
8806 // No need to invert a one-pixel-high image.
8807 unsigned char *p = image.p();
8808 in.read((char *)p, row_length);
8809 }
8810
8811 return image;
8812}
8813
8814/**
8815 * Called by read_dds for DXT2 or DXT3 file format.
8816 */
8817PTA_uchar Texture::
8818read_dds_level_bc2(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8819 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8820 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8821
8822 static const int div = 4;
8823 static const int block_bytes = 16;
8824
8825 // The DXT3 image is divided into num_rows x num_cols blocks, where each
8826 // block represents 4x4 pixels. Unlike DXT1, each block consists of two
8827 // 8-byte chunks, representing the alpha and color separately.
8828 int num_cols = max(div, x_size) / div;
8829 int num_rows = max(div, y_size) / div;
8830 int row_length = num_cols * block_bytes;
8831 int linear_size = row_length * num_rows;
8832
8833 if (n == 0) {
8834 if (header.dds_flags & DDSD_LINEARSIZE) {
8835 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8836 }
8837 }
8838
8839 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8840
8841 if (y_size >= 4) {
8842 // We have to flip the image as we read it, because of DirectX's inverted
8843 // sense of up. That means we (a) reverse the order of the rows of blocks
8844 // . . .
8845 for (int ri = num_rows - 1; ri >= 0; --ri) {
8846 unsigned char *p = image.p() + row_length * ri;
8847 in.read((char *)p, row_length);
8848
8849 for (int ci = 0; ci < num_cols; ++ci) {
8850 // . . . and (b) within each block, we reverse the 4 individual rows
8851 // of 4 pixels.
8852 uint32_t *cells = (uint32_t *)p;
8853
8854 // Alpha. The block is four 16-bit words of pixel data.
8855 uint32_t w0 = cells[0];
8856 uint32_t w1 = cells[1];
8857 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8858 w1 = ((w1 & 0xffff) << 16) | ((w1 & 0xffff0000U) >> 16);
8859 cells[0] = w1;
8860 cells[1] = w0;
8861
8862 // Color. Only the second 32-bit dword of the color block represents
8863 // the pixel data.
8864 uint32_t w = cells[3];
8865 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8866 cells[3] = w;
8867
8868 p += block_bytes;
8869 }
8870 }
8871
8872 } else if (y_size >= 2) {
8873 // To invert a two-pixel high image, we just flip two rows within a cell.
8874 unsigned char *p = image.p();
8875 in.read((char *)p, row_length);
8876
8877 for (int ci = 0; ci < num_cols; ++ci) {
8878 uint32_t *cells = (uint32_t *)p;
8879
8880 uint32_t w0 = cells[0];
8881 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8882 cells[0] = w0;
8883
8884 uint32_t w = cells[3];
8885 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8886 cells[3] = w;
8887
8888 p += block_bytes;
8889 }
8890
8891 } else if (y_size >= 1) {
8892 // No need to invert a one-pixel-high image.
8893 unsigned char *p = image.p();
8894 in.read((char *)p, row_length);
8895 }
8896
8897 return image;
8898}
8899
8900/**
8901 * Called by read_dds for DXT4 or DXT5 file format.
8902 */
8903PTA_uchar Texture::
8904read_dds_level_bc3(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8905 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8906 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8907
8908 static const int div = 4;
8909 static const int block_bytes = 16;
8910
8911 // The DXT5 image is similar to DXT3, in that there each 4x4 block of pixels
8912 // consists of an alpha block and a color block, but the layout of the alpha
8913 // block is different.
8914 int num_cols = max(div, x_size) / div;
8915 int num_rows = max(div, y_size) / div;
8916 int row_length = num_cols * block_bytes;
8917 int linear_size = row_length * num_rows;
8918
8919 if (n == 0) {
8920 if (header.dds_flags & DDSD_LINEARSIZE) {
8921 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8922 }
8923 }
8924
8925 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8926
8927 if (y_size >= 4) {
8928 // We have to flip the image as we read it, because of DirectX's inverted
8929 // sense of up. That means we (a) reverse the order of the rows of blocks
8930 // . . .
8931 for (int ri = num_rows - 1; ri >= 0; --ri) {
8932 unsigned char *p = image.p() + row_length * ri;
8933 in.read((char *)p, row_length);
8934
8935 for (int ci = 0; ci < num_cols; ++ci) {
8936 // . . . and (b) within each block, we reverse the 4 individual rows
8937 // of 4 pixels.
8938 uint32_t *cells = (uint32_t *)p;
8939
8940 // Alpha. The block is one 16-bit word of reference values, followed
8941 // by six words of pixel values, in 12-bit rows. Tricky to invert.
8942 unsigned char p2 = p[2];
8943 unsigned char p3 = p[3];
8944 unsigned char p4 = p[4];
8945 unsigned char p5 = p[5];
8946 unsigned char p6 = p[6];
8947 unsigned char p7 = p[7];
8948
8949 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
8950 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
8951 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
8952 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8953 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8954 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8955
8956 // Color. Only the second 32-bit dword of the color block represents
8957 // the pixel data.
8958 uint32_t w = cells[3];
8959 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8960 cells[3] = w;
8961
8962 p += block_bytes;
8963 }
8964 }
8965
8966 } else if (y_size >= 2) {
8967 // To invert a two-pixel high image, we just flip two rows within a cell.
8968 unsigned char *p = image.p();
8969 in.read((char *)p, row_length);
8970
8971 for (int ci = 0; ci < num_cols; ++ci) {
8972 uint32_t *cells = (uint32_t *)p;
8973
8974 unsigned char p2 = p[2];
8975 unsigned char p3 = p[3];
8976 unsigned char p4 = p[4];
8977
8978 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8979 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8980 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8981
8982 uint32_t w0 = cells[0];
8983 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8984 cells[0] = w0;
8985
8986 uint32_t w = cells[3];
8987 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8988 cells[3] = w;
8989
8990 p += block_bytes;
8991 }
8992
8993 } else if (y_size >= 1) {
8994 // No need to invert a one-pixel-high image.
8995 unsigned char *p = image.p();
8996 in.read((char *)p, row_length);
8997 }
8998
8999 return image;
9000}
9001
9002/**
9003 * Called by read_dds for ATI1 compression.
9004 */
9005PTA_uchar Texture::
9006read_dds_level_bc4(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9007 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9008 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9009
9010 static const int div = 4;
9011 static const int block_bytes = 8;
9012
9013 // The ATI1 (BC4) format uses the same compression mechanism as the alpha
9014 // channel of DXT5.
9015 int num_cols = max(div, x_size) / div;
9016 int num_rows = max(div, y_size) / div;
9017 int row_length = num_cols * block_bytes;
9018 int linear_size = row_length * num_rows;
9019
9020 if (n == 0) {
9021 if (header.dds_flags & DDSD_LINEARSIZE) {
9022 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9023 }
9024 }
9025
9026 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9027
9028 if (y_size >= 4) {
9029 // We have to flip the image as we read it, because of DirectX's inverted
9030 // sense of up. That means we (a) reverse the order of the rows of blocks
9031 // . . .
9032 for (int ri = num_rows - 1; ri >= 0; --ri) {
9033 unsigned char *p = image.p() + row_length * ri;
9034 in.read((char *)p, row_length);
9035
9036 for (int ci = 0; ci < num_cols; ++ci) {
9037 // . . . and (b) within each block, we reverse the 4 individual rows
9038 // of 4 pixels. The block is one 16-bit word of reference values,
9039 // followed by six words of pixel values, in 12-bit rows. Tricky to
9040 // invert.
9041 unsigned char p2 = p[2];
9042 unsigned char p3 = p[3];
9043 unsigned char p4 = p[4];
9044 unsigned char p5 = p[5];
9045 unsigned char p6 = p[6];
9046 unsigned char p7 = p[7];
9047
9048 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9049 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9050 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9051 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9052 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9053 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9054
9055 p += block_bytes;
9056 }
9057 }
9058
9059 } else if (y_size >= 2) {
9060 // To invert a two-pixel high image, we just flip two rows within a cell.
9061 unsigned char *p = image.p();
9062 in.read((char *)p, row_length);
9063
9064 for (int ci = 0; ci < num_cols; ++ci) {
9065 unsigned char p2 = p[2];
9066 unsigned char p3 = p[3];
9067 unsigned char p4 = p[4];
9068
9069 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9070 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9071 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9072
9073 p += block_bytes;
9074 }
9075
9076 } else if (y_size >= 1) {
9077 // No need to invert a one-pixel-high image.
9078 unsigned char *p = image.p();
9079 in.read((char *)p, row_length);
9080 }
9081
9082 return image;
9083}
9084
9085/**
9086 * Called by read_dds for ATI2 compression.
9087 */
9088PTA_uchar Texture::
9089read_dds_level_bc5(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9090 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9091 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9092
9093 // The ATI2 (BC5) format uses the same compression mechanism as the ATI1
9094 // (BC4) format, but doubles the channels.
9095 int num_cols = max(4, x_size) / 2;
9096 int num_rows = max(4, y_size) / 4;
9097 int row_length = num_cols * 8;
9098 int linear_size = row_length * num_rows;
9099
9100 if (n == 0) {
9101 if (header.dds_flags & DDSD_LINEARSIZE) {
9102 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9103 }
9104 }
9105
9106 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9107
9108 if (y_size >= 4) {
9109 // We have to flip the image as we read it, because of DirectX's inverted
9110 // sense of up. That means we (a) reverse the order of the rows of blocks
9111 // . . .
9112 for (int ri = num_rows - 1; ri >= 0; --ri) {
9113 unsigned char *p = image.p() + row_length * ri;
9114 in.read((char *)p, row_length);
9115
9116 for (int ci = 0; ci < num_cols; ++ci) {
9117 // . . . and (b) within each block, we reverse the 4 individual rows
9118 // of 4 pixels. The block is one 16-bit word of reference values,
9119 // followed by six words of pixel values, in 12-bit rows. Tricky to
9120 // invert.
9121 unsigned char p2 = p[2];
9122 unsigned char p3 = p[3];
9123 unsigned char p4 = p[4];
9124 unsigned char p5 = p[5];
9125 unsigned char p6 = p[6];
9126 unsigned char p7 = p[7];
9127
9128 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9129 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9130 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9131 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9132 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9133 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9134
9135 p += 8;
9136 }
9137 }
9138
9139 } else if (y_size >= 2) {
9140 // To invert a two-pixel high image, we just flip two rows within a cell.
9141 unsigned char *p = image.p();
9142 in.read((char *)p, row_length);
9143
9144 for (int ci = 0; ci < num_cols; ++ci) {
9145 unsigned char p2 = p[2];
9146 unsigned char p3 = p[3];
9147 unsigned char p4 = p[4];
9148
9149 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9150 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9151 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9152
9153 p += 8;
9154 }
9155
9156 } else if (y_size >= 1) {
9157 // No need to invert a one-pixel-high image.
9158 unsigned char *p = image.p();
9159 in.read((char *)p, row_length);
9160 }
9161
9162 return image;
9163}
9164
9165/**
9166 * Removes the indicated PreparedGraphicsObjects table from the Texture's
9167 * table, without actually releasing the texture. This is intended to be
9168 * called only from PreparedGraphicsObjects::release_texture(); it should
9169 * never be called by user code.
9170 */
9171void Texture::
9172clear_prepared(int view, PreparedGraphicsObjects *prepared_objects) {
9173 PreparedViews::iterator pvi;
9174 pvi = _prepared_views.find(prepared_objects);
9175 if (pvi != _prepared_views.end()) {
9176 Contexts &contexts = (*pvi).second;
9177 Contexts::iterator ci;
9178 ci = contexts.find(view);
9179 if (ci != contexts.end()) {
9180 contexts.erase(ci);
9181 }
9182
9183 if (contexts.empty()) {
9184 _prepared_views.erase(pvi);
9185 }
9186 }
9187}
9188
9189/**
9190 * Reduces the number of channels in the texture, if necessary, according to
9191 * num_channels.
9192 */
9193void Texture::
9194consider_downgrade(PNMImage &pnmimage, int num_channels, const string &name) {
9195 if (num_channels != 0 && num_channels < pnmimage.get_num_channels()) {
9196 // One special case: we can't reduce from 3 to 2 components, since that
9197 // would require adding an alpha channel.
9198 if (pnmimage.get_num_channels() == 3 && num_channels == 2) {
9199 return;
9200 }
9201
9202 gobj_cat.info()
9203 << "Downgrading " << name << " from "
9204 << pnmimage.get_num_channels() << " components to "
9205 << num_channels << ".\n";
9206 pnmimage.set_num_channels(num_channels);
9207 }
9208}
9209
9210/**
9211 * Called by generate_simple_ram_image(), this compares the two PNMImages
9212 * pixel-by-pixel. If they're similar enough (within a given threshold),
9213 * returns true.
9214 */
9215bool Texture::
9216compare_images(const PNMImage &a, const PNMImage &b) {
9217 nassertr(a.get_maxval() == 255 && b.get_maxval() == 255, false);
9218 nassertr(a.get_num_channels() == 4 && b.get_num_channels() == 4, false);
9219 nassertr(a.get_x_size() == b.get_x_size() &&
9220 a.get_y_size() == b.get_y_size(), false);
9221
9222 const xel *a_array = a.get_array();
9223 const xel *b_array = b.get_array();
9224 const xelval *a_alpha = a.get_alpha_array();
9225 const xelval *b_alpha = b.get_alpha_array();
9226
9227 int x_size = a.get_x_size();
9228
9229 int delta = 0;
9230 for (int yi = 0; yi < a.get_y_size(); ++yi) {
9231 const xel *a_row = a_array + yi * x_size;
9232 const xel *b_row = b_array + yi * x_size;
9233 const xelval *a_alpha_row = a_alpha + yi * x_size;
9234 const xelval *b_alpha_row = b_alpha + yi * x_size;
9235 for (int xi = 0; xi < x_size; ++xi) {
9236 delta += abs(PPM_GETR(a_row[xi]) - PPM_GETR(b_row[xi]));
9237 delta += abs(PPM_GETG(a_row[xi]) - PPM_GETG(b_row[xi]));
9238 delta += abs(PPM_GETB(a_row[xi]) - PPM_GETB(b_row[xi]));
9239 delta += abs(a_alpha_row[xi] - b_alpha_row[xi]);
9240 }
9241 }
9242
9243 double average_delta = (double)delta / ((double)a.get_x_size() * (double)b.get_y_size() * (double)a.get_maxval());
9244 return (average_delta <= simple_image_threshold);
9245}
9246
9247/**
9248 * Generates the next mipmap level from the previous one. If there are
9249 * multiple pages (e.g. a cube map), generates each page independently.
9250 *
9251 * x_size and y_size are the size of the previous level. They need not be a
9252 * power of 2, or even a multiple of 2.
9253 *
9254 * Assumes the lock is already held.
9255 */
9256void Texture::
9257do_filter_2d_mipmap_pages(const CData *cdata,
9258 Texture::RamImage &to, const Texture::RamImage &from,
9259 int x_size, int y_size) const {
9260 Filter2DComponent *filter_component;
9261 Filter2DComponent *filter_alpha;
9262
9263 if (is_srgb(cdata->_format)) {
9264 // We currently only support sRGB mipmap generation for unsigned byte
9265 // textures, due to our use of a lookup table.
9266 nassertv(cdata->_component_type == T_unsigned_byte);
9267
9268 if (has_sse2_sRGB_encode()) {
9269 filter_component = &filter_2d_unsigned_byte_srgb_sse2;
9270 } else {
9271 filter_component = &filter_2d_unsigned_byte_srgb;
9272 }
9273
9274 // Alpha is always linear.
9275 filter_alpha = &filter_2d_unsigned_byte;
9276
9277 } else {
9278 switch (cdata->_component_type) {
9279 case T_unsigned_byte:
9280 filter_component = &filter_2d_unsigned_byte;
9281 break;
9282
9283 case T_unsigned_short:
9284 filter_component = &filter_2d_unsigned_short;
9285 break;
9286
9287 case T_float:
9288 filter_component = &filter_2d_float;
9289 break;
9290
9291 default:
9292 gobj_cat.error()
9293 << "Unable to generate mipmaps for 2D texture with component type "
9294 << cdata->_component_type << "!";
9295 return;
9296 }
9297 filter_alpha = filter_component;
9298 }
9299
9300 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9301 size_t row_size = (size_t)x_size * pixel_size;
9302
9303 int to_x_size = max(x_size >> 1, 1);
9304 int to_y_size = max(y_size >> 1, 1);
9305
9306 size_t to_row_size = (size_t)to_x_size * pixel_size;
9307 to._page_size = (size_t)to_y_size * to_row_size;
9308 to._image = PTA_uchar::empty_array(to._page_size * cdata->_z_size * cdata->_num_views, get_class_type());
9309
9310 bool alpha = has_alpha(cdata->_format);
9311 int num_color_components = cdata->_num_components;
9312 if (alpha) {
9313 --num_color_components;
9314 }
9315
9316 int num_pages = cdata->_z_size * cdata->_num_views;
9317 for (int z = 0; z < num_pages; ++z) {
9318 // For each level.
9319 unsigned char *p = to._image.p() + z * to._page_size;
9320 nassertv(p <= to._image.p() + to._image.size() + to._page_size);
9321 const unsigned char *q = from._image.p() + z * from._page_size;
9322 nassertv(q <= from._image.p() + from._image.size() + from._page_size);
9323 if (y_size != 1) {
9324 int y;
9325 for (y = 0; y < y_size - 1; y += 2) {
9326 // For each row.
9327 nassertv(p == to._image.p() + z * to._page_size + (y / 2) * to_row_size);
9328 nassertv(q == from._image.p() + z * from._page_size + y * row_size);
9329 if (x_size != 1) {
9330 int x;
9331 for (x = 0; x < x_size - 1; x += 2) {
9332 // For each pixel.
9333 for (int c = 0; c < num_color_components; ++c) {
9334 // For each component.
9335 filter_component(p, q, pixel_size, row_size);
9336 }
9337 if (alpha) {
9338 filter_alpha(p, q, pixel_size, row_size);
9339 }
9340 q += pixel_size;
9341 }
9342 if (x < x_size) {
9343 // Skip the last odd pixel.
9344 q += pixel_size;
9345 }
9346 } else {
9347 // Just one pixel.
9348 for (int c = 0; c < num_color_components; ++c) {
9349 // For each component.
9350 filter_component(p, q, 0, row_size);
9351 }
9352 if (alpha) {
9353 filter_alpha(p, q, 0, row_size);
9354 }
9355 }
9356 q += row_size;
9358 }
9359 if (y < y_size) {
9360 // Skip the last odd row.
9361 q += row_size;
9362 }
9363 } else {
9364 // Just one row.
9365 if (x_size != 1) {
9366 int x;
9367 for (x = 0; x < x_size - 1; x += 2) {
9368 // For each pixel.
9369 for (int c = 0; c < num_color_components; ++c) {
9370 // For each component.
9371 filter_component(p, q, pixel_size, 0);
9372 }
9373 if (alpha) {
9374 filter_alpha(p, q, pixel_size, 0);
9375 }
9376 q += pixel_size;
9377 }
9378 if (x < x_size) {
9379 // Skip the last odd pixel.
9380 q += pixel_size;
9381 }
9382 } else {
9383 // Just one pixel.
9384 for (int c = 0; c < num_color_components; ++c) {
9385 // For each component.
9386 filter_component(p, q, 0, 0);
9387 }
9388 if (alpha) {
9389 filter_alpha(p, q, pixel_size, 0);
9390 }
9391 }
9392 }
9393
9394 nassertv(p == to._image.p() + (z + 1) * to._page_size);
9395 nassertv(q == from._image.p() + (z + 1) * from._page_size);
9396 }
9397}
9398
9399/**
9400 * Generates the next mipmap level from the previous one, treating all the
9401 * pages of the level as a single 3-d block of pixels.
9402 *
9403 * x_size, y_size, and z_size are the size of the previous level. They need
9404 * not be a power of 2, or even a multiple of 2.
9405 *
9406 * Assumes the lock is already held.
9407 */
9408void Texture::
9409do_filter_3d_mipmap_level(const CData *cdata,
9410 Texture::RamImage &to, const Texture::RamImage &from,
9411 int x_size, int y_size, int z_size) const {
9412 Filter3DComponent *filter_component;
9413 Filter3DComponent *filter_alpha;
9414
9415 if (is_srgb(cdata->_format)) {
9416 // We currently only support sRGB mipmap generation for unsigned byte
9417 // textures, due to our use of a lookup table.
9418 nassertv(cdata->_component_type == T_unsigned_byte);
9419
9420 if (has_sse2_sRGB_encode()) {
9421 filter_component = &filter_3d_unsigned_byte_srgb_sse2;
9422 } else {
9423 filter_component = &filter_3d_unsigned_byte_srgb;
9424 }
9425
9426 // Alpha is always linear.
9427 filter_alpha = &filter_3d_unsigned_byte;
9428
9429 } else {
9430 switch (cdata->_component_type) {
9431 case T_unsigned_byte:
9432 filter_component = &filter_3d_unsigned_byte;
9433 break;
9434
9435 case T_unsigned_short:
9436 filter_component = &filter_3d_unsigned_short;
9437 break;
9438
9439 case T_float:
9440 filter_component = &filter_3d_float;
9441 break;
9442
9443 default:
9444 gobj_cat.error()
9445 << "Unable to generate mipmaps for 3D texture with component type "
9446 << cdata->_component_type << "!";
9447 return;
9448 }
9449 filter_alpha = filter_component;
9450 }
9451
9452 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9453 size_t row_size = (size_t)x_size * pixel_size;
9454 size_t page_size = (size_t)y_size * row_size;
9455 size_t view_size = (size_t)z_size * page_size;
9456
9457 int to_x_size = max(x_size >> 1, 1);
9458 int to_y_size = max(y_size >> 1, 1);
9459 int to_z_size = max(z_size >> 1, 1);
9460
9461 size_t to_row_size = (size_t)to_x_size * pixel_size;
9462 size_t to_page_size = (size_t)to_y_size * to_row_size;
9463 size_t to_view_size = (size_t)to_z_size * to_page_size;
9464 to._page_size = to_page_size;
9465 to._image = PTA_uchar::empty_array(to_page_size * to_z_size * cdata->_num_views, get_class_type());
9466
9467 bool alpha = has_alpha(cdata->_format);
9468 int num_color_components = cdata->_num_components;
9469 if (alpha) {
9470 --num_color_components;
9471 }
9472
9473 for (int view = 0; view < cdata->_num_views; ++view) {
9474 unsigned char *start_to = to._image.p() + view * to_view_size;
9475 const unsigned char *start_from = from._image.p() + view * view_size;
9476 nassertv(start_to + to_view_size <= to._image.p() + to._image.size());
9477 nassertv(start_from + view_size <= from._image.p() + from._image.size());
9478 unsigned char *p = start_to;
9479 const unsigned char *q = start_from;
9480 if (z_size != 1) {
9481 int z;
9482 for (z = 0; z < z_size - 1; z += 2) {
9483 // For each level.
9484 nassertv(p == start_to + (z / 2) * to_page_size);
9485 nassertv(q == start_from + z * page_size);
9486 if (y_size != 1) {
9487 int y;
9488 for (y = 0; y < y_size - 1; y += 2) {
9489 // For each row.
9490 nassertv(p == start_to + (z / 2) * to_page_size + (y / 2) * to_row_size);
9491 nassertv(q == start_from + z * page_size + y * row_size);
9492 if (x_size != 1) {
9493 int x;
9494 for (x = 0; x < x_size - 1; x += 2) {
9495 // For each pixel.
9496 for (int c = 0; c < num_color_components; ++c) {
9497 // For each component.
9498 filter_component(p, q, pixel_size, row_size, page_size);
9499 }
9500 if (alpha) {
9501 filter_alpha(p, q, pixel_size, row_size, page_size);
9502 }
9503 q += pixel_size;
9504 }
9505 if (x < x_size) {
9506 // Skip the last odd pixel.
9507 q += pixel_size;
9508 }
9509 } else {
9510 // Just one pixel.
9511 for (int c = 0; c < num_color_components; ++c) {
9512 // For each component.
9513 filter_component(p, q, 0, row_size, page_size);
9514 }
9515 if (alpha) {
9516 filter_alpha(p, q, 0, row_size, page_size);
9517 }
9518 }
9519 q += row_size;
9521 }
9522 if (y < y_size) {
9523 // Skip the last odd row.
9524 q += row_size;
9525 }
9526 } else {
9527 // Just one row.
9528 if (x_size != 1) {
9529 int x;
9530 for (x = 0; x < x_size - 1; x += 2) {
9531 // For each pixel.
9532 for (int c = 0; c < num_color_components; ++c) {
9533 // For each component.
9534 filter_component(p, q, pixel_size, 0, page_size);
9535 }
9536 if (alpha) {
9537 filter_alpha(p, q, pixel_size, 0, page_size);
9538 }
9539 q += pixel_size;
9540 }
9541 if (x < x_size) {
9542 // Skip the last odd pixel.
9543 q += pixel_size;
9544 }
9545 } else {
9546 // Just one pixel.
9547 for (int c = 0; c < num_color_components; ++c) {
9548 // For each component.
9549 filter_component(p, q, 0, 0, page_size);
9550 }
9551 if (alpha) {
9552 filter_alpha(p, q, 0, 0, page_size);
9553 }
9554 }
9555 }
9556 q += page_size;
9557 }
9558 if (z < z_size) {
9559 // Skip the last odd page.
9560 q += page_size;
9561 }
9562 } else {
9563 // Just one page.
9564 if (y_size != 1) {
9565 int y;
9566 for (y = 0; y < y_size - 1; y += 2) {
9567 // For each row.
9568 nassertv(p == start_to + (y / 2) * to_row_size);
9569 nassertv(q == start_from + y * row_size);
9570 if (x_size != 1) {
9571 int x;
9572 for (x = 0; x < x_size - 1; x += 2) {
9573 // For each pixel.
9574 for (int c = 0; c < num_color_components; ++c) {
9575 // For each component.
9576 filter_component(p, q, pixel_size, row_size, 0);
9577 }
9578 if (alpha) {
9579 filter_alpha(p, q, pixel_size, row_size, 0);
9580 }
9581 q += pixel_size;
9582 }
9583 if (x < x_size) {
9584 // Skip the last odd pixel.
9585 q += pixel_size;
9586 }
9587 } else {
9588 // Just one pixel.
9589 for (int c = 0; c < num_color_components; ++c) {
9590 // For each component.
9591 filter_component(p, q, 0, row_size, 0);
9592 }
9593 if (alpha) {
9594 filter_alpha(p, q, 0, row_size, 0);
9595 }
9596 }
9597 q += row_size;
9599 }
9600 if (y < y_size) {
9601 // Skip the last odd row.
9602 q += row_size;
9603 }
9604 } else {
9605 // Just one row.
9606 if (x_size != 1) {
9607 int x;
9608 for (x = 0; x < x_size - 1; x += 2) {
9609 // For each pixel.
9610 for (int c = 0; c < num_color_components; ++c) {
9611 // For each component.
9612 filter_component(p, q, pixel_size, 0, 0);
9613 }
9614 if (alpha) {
9615 filter_alpha(p, q, pixel_size, 0, 0);
9616 }
9617 q += pixel_size;
9618 }
9619 if (x < x_size) {
9620 // Skip the last odd pixel.
9621 q += pixel_size;
9622 }
9623 } else {
9624 // Just one pixel.
9625 for (int c = 0; c < num_color_components; ++c) {
9626 // For each component.
9627 filter_component(p, q, 0, 0, 0);
9628 }
9629 if (alpha) {
9630 filter_alpha(p, q, 0, 0, 0);
9631 }
9632 }
9633 }
9634 }
9635
9636 nassertv(p == start_to + to_z_size * to_page_size);
9637 nassertv(q == start_from + z_size * page_size);
9638 }
9639}
9640
9641/**
9642 * Averages a 2x2 block of pixel components into a single pixel component, for
9643 * producing the next mipmap level. Increments p and q to the next component.
9644 */
9645void Texture::
9646filter_2d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9647 size_t pixel_size, size_t row_size) {
9648 unsigned int result = ((unsigned int)q[0] +
9649 (unsigned int)q[pixel_size] +
9650 (unsigned int)q[row_size] +
9651 (unsigned int)q[pixel_size + row_size]) >> 2;
9652 *p = (unsigned char)result;
9653 ++p;
9654 ++q;
9655}
9656
9657/**
9658 * Averages a 2x2 block of pixel components into a single pixel component, for
9659 * producing the next mipmap level. Increments p and q to the next component.
9660 */
9661void Texture::
9662filter_2d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9663 size_t pixel_size, size_t row_size) {
9664 float result = (decode_sRGB_float(q[0]) +
9665 decode_sRGB_float(q[pixel_size]) +
9666 decode_sRGB_float(q[row_size]) +
9667 decode_sRGB_float(q[pixel_size + row_size]));
9668
9669 *p = encode_sRGB_uchar(result * 0.25f);
9670 ++p;
9671 ++q;
9672}
9673
9674/**
9675 * Averages a 2x2 block of pixel components into a single pixel component, for
9676 * producing the next mipmap level. Increments p and q to the next component.
9677 */
9678void Texture::
9679filter_2d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9680 size_t pixel_size, size_t row_size) {
9681 float result = (decode_sRGB_float(q[0]) +
9682 decode_sRGB_float(q[pixel_size]) +
9683 decode_sRGB_float(q[row_size]) +
9684 decode_sRGB_float(q[pixel_size + row_size]));
9685
9686 *p = encode_sRGB_uchar_sse2(result * 0.25f);
9687 ++p;
9688 ++q;
9689}
9690
9691/**
9692 * Averages a 2x2 block of pixel components into a single pixel component, for
9693 * producing the next mipmap level. Increments p and q to the next component.
9694 */
9695void Texture::
9696filter_2d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9697 size_t pixel_size, size_t row_size) {
9698 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9699 (unsigned int)*(unsigned short *)&q[pixel_size] +
9700 (unsigned int)*(unsigned short *)&q[row_size] +
9701 (unsigned int)*(unsigned short *)&q[pixel_size + row_size]) >> 2;
9702 store_unscaled_short(p, result);
9703 q += 2;
9704}
9705
9706/**
9707 * Averages a 2x2 block of pixel components into a single pixel component, for
9708 * producing the next mipmap level. Increments p and q to the next component.
9709 */
9710void Texture::
9711filter_2d_float(unsigned char *&p, const unsigned char *&q,
9712 size_t pixel_size, size_t row_size) {
9713 *(float *)p = (*(float *)&q[0] +
9714 *(float *)&q[pixel_size] +
9715 *(float *)&q[row_size] +
9716 *(float *)&q[pixel_size + row_size]) / 4.0f;
9717 p += 4;
9718 q += 4;
9719}
9720
9721/**
9722 * Averages a 2x2x2 block of pixel components into a single pixel component,
9723 * for producing the next mipmap level. Increments p and q to the next
9724 * component.
9725 */
9726void Texture::
9727filter_3d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9728 size_t pixel_size, size_t row_size, size_t page_size) {
9729 unsigned int result = ((unsigned int)q[0] +
9730 (unsigned int)q[pixel_size] +
9731 (unsigned int)q[row_size] +
9732 (unsigned int)q[pixel_size + row_size] +
9733 (unsigned int)q[page_size] +
9734 (unsigned int)q[pixel_size + page_size] +
9735 (unsigned int)q[row_size + page_size] +
9736 (unsigned int)q[pixel_size + row_size + page_size]) >> 3;
9737 *p = (unsigned char)result;
9738 ++p;
9739 ++q;
9740}
9741
9742/**
9743 * Averages a 2x2x2 block of pixel components into a single pixel component,
9744 * for producing the next mipmap level. Increments p and q to the next
9745 * component.
9746 */
9747void Texture::
9748filter_3d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9749 size_t pixel_size, size_t row_size, size_t page_size) {
9750 float result = (decode_sRGB_float(q[0]) +
9751 decode_sRGB_float(q[pixel_size]) +
9752 decode_sRGB_float(q[row_size]) +
9753 decode_sRGB_float(q[pixel_size + row_size]) +
9754 decode_sRGB_float(q[page_size]) +
9755 decode_sRGB_float(q[pixel_size + page_size]) +
9756 decode_sRGB_float(q[row_size + page_size]) +
9757 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9758
9759 *p = encode_sRGB_uchar(result * 0.125f);
9760 ++p;
9761 ++q;
9762}
9763
9764/**
9765 * Averages a 2x2x2 block of pixel components into a single pixel component,
9766 * for producing the next mipmap level. Increments p and q to the next
9767 * component.
9768 */
9769void Texture::
9770filter_3d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9771 size_t pixel_size, size_t row_size, size_t page_size) {
9772 float result = (decode_sRGB_float(q[0]) +
9773 decode_sRGB_float(q[pixel_size]) +
9774 decode_sRGB_float(q[row_size]) +
9775 decode_sRGB_float(q[pixel_size + row_size]) +
9776 decode_sRGB_float(q[page_size]) +
9777 decode_sRGB_float(q[pixel_size + page_size]) +
9778 decode_sRGB_float(q[row_size + page_size]) +
9779 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9780
9781 *p = encode_sRGB_uchar_sse2(result * 0.125f);
9782 ++p;
9783 ++q;
9784}
9785
9786/**
9787 * Averages a 2x2x2 block of pixel components into a single pixel component,
9788 * for producing the next mipmap level. Increments p and q to the next
9789 * component.
9790 */
9791void Texture::
9792filter_3d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9793 size_t pixel_size, size_t row_size,
9794 size_t page_size) {
9795 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9796 (unsigned int)*(unsigned short *)&q[pixel_size] +
9797 (unsigned int)*(unsigned short *)&q[row_size] +
9798 (unsigned int)*(unsigned short *)&q[pixel_size + row_size] +
9799 (unsigned int)*(unsigned short *)&q[page_size] +
9800 (unsigned int)*(unsigned short *)&q[pixel_size + page_size] +
9801 (unsigned int)*(unsigned short *)&q[row_size + page_size] +
9802 (unsigned int)*(unsigned short *)&q[pixel_size + row_size + page_size]) >> 3;
9803 store_unscaled_short(p, result);
9804 q += 2;
9805}
9806
9807/**
9808 * Averages a 2x2x2 block of pixel components into a single pixel component,
9809 * for producing the next mipmap level. Increments p and q to the next
9810 * component.
9811 */
9812void Texture::
9813filter_3d_float(unsigned char *&p, const unsigned char *&q,
9814 size_t pixel_size, size_t row_size, size_t page_size) {
9815 *(float *)p = (*(float *)&q[0] +
9816 *(float *)&q[pixel_size] +
9817 *(float *)&q[row_size] +
9818 *(float *)&q[pixel_size + row_size] +
9819 *(float *)&q[page_size] +
9820 *(float *)&q[pixel_size + page_size] +
9821 *(float *)&q[row_size + page_size] +
9822 *(float *)&q[pixel_size + row_size + page_size]) / 8.0f;
9823 p += 4;
9824 q += 4;
9825}
9826
9827/**
9828 * Invokes the squish library to compress the RAM image(s).
9829 */
9830bool Texture::
9831do_squish(CData *cdata, Texture::CompressionMode compression, int squish_flags) {
9832#ifdef HAVE_SQUISH
9833 if (!do_has_all_ram_mipmap_images(cdata)) {
9834 // If we're about to compress the RAM image, we should ensure that we have
9835 // all of the mipmap levels first.
9836 do_generate_ram_mipmap_images(cdata, false);
9837 }
9838
9839 RamImages compressed_ram_images;
9840 compressed_ram_images.reserve(cdata->_ram_images.size());
9841 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9842 RamImage compressed_image;
9843 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9844 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9845 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9846 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9847 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9848
9849 compressed_image._page_size = page_size;
9850 compressed_image._image = PTA_uchar::empty_array(page_size * num_pages);
9851 for (int z = 0; z < num_pages; ++z) {
9852 unsigned char *dest_page = compressed_image._image.p() + z * page_size;
9853 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * cdata->_ram_images[n]._page_size;
9854 unsigned const char *source_page_end = source_page + cdata->_ram_images[n]._page_size;
9855 // Convert one 4 x 4 cell at a time.
9856 unsigned char *d = dest_page;
9857 for (int y = 0; y < y_size; y += 4) {
9858 for (int x = 0; x < x_size; x += 4) {
9859 unsigned char tb[16 * 4];
9860 int mask = 0;
9861 unsigned char *t = tb;
9862 for (int i = 0; i < 16; ++i) {
9863 int xi = x + i % 4;
9864 int yi = y + i / 4;
9865 unsigned const char *s = source_page + (yi * x_size + xi) * cdata->_num_components;
9866 if (s < source_page_end) {
9867 switch (cdata->_num_components) {
9868 case 1:
9869 t[0] = s[0]; // r
9870 t[1] = s[0]; // g
9871 t[2] = s[0]; // b
9872 t[3] = 255; // a
9873 break;
9874
9875 case 2:
9876 t[0] = s[0]; // r
9877 t[1] = s[0]; // g
9878 t[2] = s[0]; // b
9879 t[3] = s[1]; // a
9880 break;
9881
9882 case 3:
9883 t[0] = s[2]; // r
9884 t[1] = s[1]; // g
9885 t[2] = s[0]; // b
9886 t[3] = 255; // a
9887 break;
9888
9889 case 4:
9890 t[0] = s[2]; // r
9891 t[1] = s[1]; // g
9892 t[2] = s[0]; // b
9893 t[3] = s[3]; // a
9894 break;
9895 }
9896 mask |= (1 << i);
9897 }
9898 t += 4;
9899 }
9900 squish::CompressMasked(tb, mask, d, squish_flags);
9901 d += cell_size;
9903 }
9904 }
9905 }
9906 compressed_ram_images.push_back(compressed_image);
9907 }
9908 cdata->_ram_images.swap(compressed_ram_images);
9909 cdata->_ram_image_compression = compression;
9910 return true;
9911
9912#else // HAVE_SQUISH
9913 return false;
9914
9915#endif // HAVE_SQUISH
9916}
9917
9918/**
9919 * Invokes the squish library to uncompress the RAM image(s).
9920 */
9921bool Texture::
9922do_unsquish(CData *cdata, int squish_flags) {
9923#ifdef HAVE_SQUISH
9924 RamImages uncompressed_ram_images;
9925 uncompressed_ram_images.reserve(cdata->_ram_images.size());
9926 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9927 RamImage uncompressed_image;
9928 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9929 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9930 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9931 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9932 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9933
9934 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
9935 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
9936 for (int z = 0; z < num_pages; ++z) {
9937 unsigned char *dest_page = uncompressed_image._image.p() + z * uncompressed_image._page_size;
9938 unsigned char *dest_page_end = dest_page + uncompressed_image._page_size;
9939 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * page_size;
9940 // Unconvert one 4 x 4 cell at a time.
9941 unsigned const char *s = source_page;
9942 for (int y = 0; y < y_size; y += 4) {
9943 for (int x = 0; x < x_size; x += 4) {
9944 unsigned char tb[16 * 4];
9945 squish::Decompress(tb, s, squish_flags);
9946 s += cell_size;
9947
9948 unsigned char *t = tb;
9949 for (int i = 0; i < 16; ++i) {
9950 int xi = x + i % 4;
9951 int yi = y + i / 4;
9952 unsigned char *d = dest_page + (yi * x_size + xi) * cdata->_num_components;
9953 if (d < dest_page_end) {
9954 switch (cdata->_num_components) {
9955 case 1:
9956 d[0] = t[1]; // g
9957 break;
9958
9959 case 2:
9960 d[0] = t[1]; // g
9961 d[1] = t[3]; // a
9962 break;
9963
9964 case 3:
9965 d[2] = t[0]; // r
9966 d[1] = t[1]; // g
9967 d[0] = t[2]; // b
9968 break;
9969
9970 case 4:
9971 d[2] = t[0]; // r
9972 d[1] = t[1]; // g
9973 d[0] = t[2]; // b
9974 d[3] = t[3]; // a
9975 break;
9976 }
9977 }
9978 t += 4;
9979 }
9980 }
9982 }
9983 }
9984 uncompressed_ram_images.push_back(uncompressed_image);
9985 }
9986 cdata->_ram_images.swap(uncompressed_ram_images);
9987 cdata->_ram_image_compression = CM_off;
9988 return true;
9989
9990#else // HAVE_SQUISH
9991 return false;
9992
9993#endif // HAVE_SQUISH
9994}
9995
9996/**
9997 * Factory method to generate a Texture object
9998 */
10001 BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
10002}
10003
10004/**
10005 * Function to write the important information in the particular object to a
10006 * Datagram
10007 */
10009write_datagram(BamWriter *manager, Datagram &me) {
10010 CDWriter cdata(_cycler, false);
10011
10012 bool has_rawdata = false;
10013 do_write_datagram_header(cdata, manager, me, has_rawdata);
10014 do_write_datagram_body(cdata, manager, me);
10015
10016 // If we are also including the texture's image data, then stuff it in here.
10017 if (has_rawdata) {
10018 do_write_datagram_rawdata(cdata, manager, me);
10019 }
10020}
10021
10022/**
10023 * Called by the BamReader to perform any final actions needed for setting up
10024 * the object after all objects have been read and all pointers have been
10025 * completed.
10026 */
10029 // Unref the pointer that we explicitly reffed in make_from_bam().
10030 unref();
10031
10032 // We should never get back to zero after unreffing our own count, because
10033 // we expect to have been stored in a pointer somewhere. If we do get to
10034 // zero, it's a memory leak; the way to avoid this is to call unref_delete()
10035 // above instead of unref(), but this is dangerous to do from within a
10036 // virtual function.
10037 nassertv(get_ref_count() != 0);
10038}
10039
10040
10041/**
10042 * Writes the header part of the texture to the Datagram. This is the common
10043 * part that is shared by all Texture subclasses, and contains the filename
10044 * and rawdata flags. This method is not virtual because all Texture
10045 * subclasses must write the same data at this step.
10046 *
10047 * This part must be read first before calling do_fillin_body() to determine
10048 * whether to load the Texture from the TexturePool or directly from the bam
10049 * stream.
10050 *
10051 * After this call, has_rawdata will be filled with either true or false,
10052 * according to whether we expect to write the texture rawdata to the bam
10053 * stream following the texture body.
10054 */
10055void Texture::
10056do_write_datagram_header(CData *cdata, BamWriter *manager, Datagram &me, bool &has_rawdata) {
10057 // Write out the texture's raw pixel data if (a) the current Bam Texture
10058 // Mode requires that, or (b) there's no filename, so the file can't be
10059 // loaded up from disk, but the raw pixel data is currently available in
10060 // RAM.
10061
10062 // Otherwise, we just write out the filename, and assume whoever loads the
10063 // bam file later will have access to the image file on disk.
10064 BamWriter::BamTextureMode file_texture_mode = manager->get_file_texture_mode();
10065 has_rawdata = (file_texture_mode == BamWriter::BTM_rawdata ||
10066 (cdata->_filename.empty() && do_has_bam_rawdata(cdata)));
10067 if (has_rawdata && !do_has_bam_rawdata(cdata)) {
10068 do_get_bam_rawdata(cdata);
10069 if (!do_has_bam_rawdata(cdata)) {
10070 // No image data after all.
10071 has_rawdata = false;
10072 }
10073 }
10074
10075 bool has_bam_dir = !manager->get_filename().empty();
10076 Filename bam_dir = manager->get_filename().get_dirname();
10077 Filename filename = cdata->_filename;
10078 Filename alpha_filename = cdata->_alpha_filename;
10079
10081
10082 switch (file_texture_mode) {
10083 case BamWriter::BTM_unchanged:
10084 case BamWriter::BTM_rawdata:
10085 break;
10086
10087 case BamWriter::BTM_fullpath:
10088 filename = cdata->_fullpath;
10089 alpha_filename = cdata->_alpha_fullpath;
10090 break;
10091
10092 case BamWriter::BTM_relative:
10093 filename = cdata->_fullpath;
10094 alpha_filename = cdata->_alpha_fullpath;
10095 bam_dir.make_absolute(vfs->get_cwd());
10096 if (!has_bam_dir || !filename.make_relative_to(bam_dir, true)) {
10097 filename.find_on_searchpath(get_model_path());
10098 }
10099 if (gobj_cat.is_debug()) {
10100 gobj_cat.debug()
10101 << "Texture file " << cdata->_fullpath
10102 << " found as " << filename << "\n";
10103 }
10104 if (!has_bam_dir || !alpha_filename.make_relative_to(bam_dir, true)) {
10105 alpha_filename.find_on_searchpath(get_model_path());
10106 }
10107 if (gobj_cat.is_debug()) {
10108 gobj_cat.debug()
10109 << "Alpha image " << cdata->_alpha_fullpath
10110 << " found as " << alpha_filename << "\n";
10111 }
10112 break;
10113
10114 case BamWriter::BTM_basename:
10115 filename = cdata->_fullpath.get_basename();
10116 alpha_filename = cdata->_alpha_fullpath.get_basename();
10117 break;
10118
10119 default:
10120 gobj_cat.error()
10121 << "Unsupported bam-texture-mode: " << (int)file_texture_mode << "\n";
10122 }
10123
10124 if (filename.empty()) {
10125 if (do_has_bam_rawdata(cdata) || cdata->_has_clear_color) {
10126 // If we don't have a filename, we have to store rawdata anyway.
10127 has_rawdata = true;
10128 }
10129 }
10130
10131 me.add_string(get_name());
10132 me.add_string(filename);
10133 me.add_string(alpha_filename);
10134 me.add_uint8(cdata->_primary_file_num_channels);
10135 me.add_uint8(cdata->_alpha_file_channel);
10136 me.add_bool(has_rawdata);
10137
10138 if (manager->get_file_minor_ver() < 25 &&
10139 cdata->_texture_type == TT_cube_map) {
10140 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10141 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10142 me.add_uint8(TT_2d_texture_array);
10143 } else {
10144 me.add_uint8(cdata->_texture_type);
10145 }
10146
10147 if (manager->get_file_minor_ver() >= 32) {
10148 me.add_bool(cdata->_has_read_mipmaps);
10149 }
10150}
10151
10152/**
10153 * Writes the body part of the texture to the Datagram. This is generally all
10154 * of the texture parameters except for the header and the rawdata.
10155 */
10156void Texture::
10157do_write_datagram_body(CData *cdata, BamWriter *manager, Datagram &me) {
10158 if (manager->get_file_minor_ver() >= 36) {
10159 cdata->_default_sampler.write_datagram(me);
10160 } else {
10161 const SamplerState &s = cdata->_default_sampler;
10162 me.add_uint8(s.get_wrap_u());
10163 me.add_uint8(s.get_wrap_v());
10164 me.add_uint8(s.get_wrap_w());
10165 me.add_uint8(s.get_minfilter());
10166 me.add_uint8(s.get_magfilter());
10168 s.get_border_color().write_datagram(me);
10169 }
10170
10171 me.add_uint8(cdata->_compression);
10172 me.add_uint8(cdata->_quality_level);
10173
10174 me.add_uint8(cdata->_format);
10175 me.add_uint8(cdata->_num_components);
10176
10177 if (cdata->_texture_type == TT_buffer_texture) {
10178 me.add_uint8(cdata->_usage_hint);
10179 }
10180
10181 if (manager->get_file_minor_ver() >= 28) {
10182 me.add_uint8(cdata->_auto_texture_scale);
10183 }
10184 me.add_uint32(cdata->_orig_file_x_size);
10185 me.add_uint32(cdata->_orig_file_y_size);
10186
10187 bool has_simple_ram_image = !cdata->_simple_ram_image._image.empty();
10189
10190 // Write out the simple image too, so it will be available later.
10192 me.add_uint32(cdata->_simple_x_size);
10193 me.add_uint32(cdata->_simple_y_size);
10194 me.add_int32(cdata->_simple_image_date_generated);
10195 me.add_uint32(cdata->_simple_ram_image._image.size());
10196 me.append_data(cdata->_simple_ram_image._image, cdata->_simple_ram_image._image.size());
10197 }
10198
10199 if (manager->get_file_minor_ver() >= 45) {
10200 me.add_bool(cdata->_has_clear_color);
10201 if (cdata->_has_clear_color) {
10202 cdata->_clear_color.write_datagram(me);
10203 }
10204 }
10205}
10206
10207/**
10208 * Writes the rawdata part of the texture to the Datagram.
10209 */
10210void Texture::
10211do_write_datagram_rawdata(CData *cdata, BamWriter *manager, Datagram &me) {
10212 me.add_uint32(cdata->_x_size);
10213 me.add_uint32(cdata->_y_size);
10214 me.add_uint32(cdata->_z_size);
10215
10216 if (manager->get_file_minor_ver() >= 30) {
10217 me.add_uint32(cdata->_pad_x_size);
10218 me.add_uint32(cdata->_pad_y_size);
10219 me.add_uint32(cdata->_pad_z_size);
10220 }
10221
10222 if (manager->get_file_minor_ver() >= 26) {
10223 me.add_uint32(cdata->_num_views);
10224 }
10225 me.add_uint8(cdata->_component_type);
10226 me.add_uint8(cdata->_component_width);
10227 me.add_uint8(cdata->_ram_image_compression);
10228
10229 if (cdata->_ram_images.empty() && cdata->_has_clear_color &&
10230 manager->get_file_minor_ver() < 45) {
10231 // For older .bam versions that don't support clear colors, make up a RAM
10232 // image.
10233 int image_size = do_get_expected_ram_image_size(cdata);
10234 me.add_uint8(1);
10235 me.add_uint32(do_get_expected_ram_page_size(cdata));
10236 me.add_uint32(image_size);
10237
10238 // Fill the image with the clear color.
10239 unsigned char pixel[16];
10240 const int pixel_size = do_get_clear_data(cdata, pixel);
10241 nassertv(pixel_size > 0);
10242
10243 for (int i = 0; i < image_size; i += pixel_size) {
10244 me.append_data(pixel, pixel_size);
10245 }
10246 } else {
10247 me.add_uint8(cdata->_ram_images.size());
10248 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
10249 me.add_uint32(cdata->_ram_images[n]._page_size);
10250 me.add_uint32(cdata->_ram_images[n]._image.size());
10251 me.append_data(cdata->_ram_images[n]._image, cdata->_ram_images[n]._image.size());
10252 }
10253 }
10254}
10255
10256/**
10257 * Factory method to generate a Texture object
10258 */
10259TypedWritable *Texture::
10260make_from_bam(const FactoryParams &params) {
10261 PT(Texture) dummy = new Texture;
10262 return dummy->make_this_from_bam(params);
10263}
10264
10265/**
10266 * Called by make_from_bam() once the particular subclass of Texture is known.
10267 * This is called on a newly-constructed Texture object of the appropriate
10268 * subclass. It will return either the same Texture object (e.g. this), or a
10269 * different Texture object loaded via the TexturePool, as appropriate.
10270 */
10271TypedWritable *Texture::
10272make_this_from_bam(const FactoryParams &params) {
10273 // The process of making a texture is slightly different than making other
10274 // TypedWritable objects. That is because all creation of Textures should
10275 // be done through calls to TexturePool, which ensures that any loads of the
10276 // same filename refer to the same memory.
10277
10278 DatagramIterator scan;
10279 BamReader *manager;
10280
10281 parse_params(params, scan, manager);
10282
10283 // Get the header information--the filenames and texture type--so we can
10284 // look up the file on disk first.
10285 string name = scan.get_string();
10286 Filename filename = scan.get_string();
10287 Filename alpha_filename = scan.get_string();
10288
10289 int primary_file_num_channels = scan.get_uint8();
10290 int alpha_file_channel = scan.get_uint8();
10291 bool has_rawdata = scan.get_bool();
10292 TextureType texture_type = (TextureType)scan.get_uint8();
10293 if (manager->get_file_minor_ver() < 25) {
10294 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10295 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10296 if (texture_type == TT_2d_texture_array) {
10297 texture_type = TT_cube_map;
10298 }
10299 }
10300 bool has_read_mipmaps = false;
10301 if (manager->get_file_minor_ver() >= 32) {
10302 has_read_mipmaps = scan.get_bool();
10303 }
10304
10305 Texture *me = nullptr;
10306 if (has_rawdata) {
10307 // If the raw image data is included, then just load the texture directly
10308 // from the stream, and return it. In this case we return the "this"
10309 // pointer, since it's a newly-created Texture object of the appropriate
10310 // type.
10311 me = this;
10312 me->set_name(name);
10313 CDWriter cdata_me(me->_cycler, true);
10314 cdata_me->_filename = filename;
10315 cdata_me->_alpha_filename = alpha_filename;
10316 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10317 cdata_me->_alpha_file_channel = alpha_file_channel;
10318 cdata_me->_texture_type = texture_type;
10319 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10320
10321 // Read the texture attributes directly from the bam stream.
10322 me->do_fillin_body(cdata_me, scan, manager);
10323 me->do_fillin_rawdata(cdata_me, scan, manager);
10324
10325 // To manage the reference count, explicitly ref it now, then unref it in
10326 // the finalize callback.
10327 me->ref();
10328 manager->register_finalize(me);
10329
10330 } else {
10331 // The raw image data isn't included, so we'll be loading the Texture via
10332 // the TexturePool. In this case we use the "this" pointer as a temporary
10333 // object to read all of the attributes from the bam stream.
10334 Texture *dummy = this;
10335 AutoTextureScale auto_texture_scale = ATS_unspecified;
10336 bool has_simple_ram_image = false;
10337 {
10338 CDWriter cdata_dummy(dummy->_cycler, true);
10339 dummy->do_fillin_body(cdata_dummy, scan, manager);
10340 auto_texture_scale = cdata_dummy->_auto_texture_scale;
10341 has_simple_ram_image = !cdata_dummy->_simple_ram_image._image.empty();
10342 }
10343
10344 if (filename.empty()) {
10345 // This texture has no filename; since we don't have an image to load,
10346 // we can't actually create the texture.
10347 gobj_cat.info()
10348 << "Cannot create texture '" << name << "' with no filename.\n";
10349
10350 } else {
10351 // This texture does have a filename, so try to load it from disk.
10353 if (!manager->get_filename().empty()) {
10354 // If texture filename was given relative to the bam filename, expand
10355 // it now.
10356 Filename bam_dir = manager->get_filename().get_dirname();
10357 vfs->resolve_filename(filename, bam_dir);
10358 if (!alpha_filename.empty()) {
10359 vfs->resolve_filename(alpha_filename, bam_dir);
10360 }
10361 }
10362
10363 LoaderOptions options = manager->get_loader_options();
10364 if (dummy->uses_mipmaps()) {
10365 options.set_texture_flags(options.get_texture_flags() | LoaderOptions::TF_generate_mipmaps);
10366 }
10367 options.set_auto_texture_scale(auto_texture_scale);
10368
10369 switch (texture_type) {
10370 case TT_buffer_texture:
10371 case TT_1d_texture:
10372 case TT_2d_texture:
10373 case TT_1d_texture_array:
10374 // If we don't want to preload textures, and we already have a simple
10375 // RAM image (or don't need one), we don't need to load it from disk.
10376 // We do check for it in the texture pool first, though, in case it has
10377 // already been loaded.
10378 if ((options.get_texture_flags() & LoaderOptions::TF_preload) == 0 &&
10379 (has_simple_ram_image || (options.get_texture_flags() & LoaderOptions::TF_preload_simple) == 0)) {
10380 if (alpha_filename.empty()) {
10381 me = TexturePool::get_texture(filename, primary_file_num_channels,
10382 has_read_mipmaps);
10383 } else {
10384 me = TexturePool::get_texture(filename, alpha_filename,
10385 primary_file_num_channels,
10386 alpha_file_channel,
10387 has_read_mipmaps);
10388 }
10389 if (me != nullptr && me->get_texture_type() == texture_type) {
10390 // We can use this.
10391 break;
10392 }
10393
10394 // We don't have a texture, but we didn't need to preload it, so we
10395 // can just use this one. We just need to know where we can find it
10396 // when we do need to reload it.
10397 Filename fullpath = filename;
10398 Filename alpha_fullpath = alpha_filename;
10399 const DSearchPath &model_path = get_model_path();
10400 if (vfs->resolve_filename(fullpath, model_path) &&
10401 (alpha_fullpath.empty() || vfs->resolve_filename(alpha_fullpath, model_path))) {
10402 me = dummy;
10403 me->set_name(name);
10404
10405 {
10406 CDWriter cdata_me(me->_cycler, true);
10407 cdata_me->_filename = filename;
10408 cdata_me->_alpha_filename = alpha_filename;
10409 cdata_me->_fullpath = fullpath;
10410 cdata_me->_alpha_fullpath = alpha_fullpath;
10411 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10412 cdata_me->_alpha_file_channel = alpha_file_channel;
10413 cdata_me->_texture_type = texture_type;
10414 cdata_me->_loaded_from_image = true;
10415 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10416 }
10417
10418 // To manage the reference count, explicitly ref it now, then unref
10419 // it in the finalize callback.
10420 me->ref();
10421 manager->register_finalize(me);
10422
10423 // Do add it to the cache now, so that future uses of this same
10424 // texture are unified.
10426 return me;
10427 }
10428 }
10429 if (alpha_filename.empty()) {
10430 me = TexturePool::load_texture(filename, primary_file_num_channels,
10431 has_read_mipmaps, options);
10432 } else {
10433 me = TexturePool::load_texture(filename, alpha_filename,
10434 primary_file_num_channels,
10435 alpha_file_channel,
10436 has_read_mipmaps, options);
10437 }
10438 break;
10439
10440 case TT_3d_texture:
10441 me = TexturePool::load_3d_texture(filename, has_read_mipmaps, options);
10442 break;
10443
10444 case TT_2d_texture_array:
10445 case TT_cube_map_array:
10446 me = TexturePool::load_2d_texture_array(filename, has_read_mipmaps, options);
10447 break;
10448
10449 case TT_cube_map:
10450 me = TexturePool::load_cube_map(filename, has_read_mipmaps, options);
10451 break;
10452 }
10453 }
10454
10455 if (me != nullptr) {
10456 me->set_name(name);
10457 CDWriter cdata_me(me->_cycler, true);
10458 me->do_fillin_from(cdata_me, dummy);
10459
10460 // Since in this case me was loaded from the TexturePool, there's no
10461 // need to explicitly manage the reference count. TexturePool will hold
10462 // it safely.
10463 }
10464 }
10465
10466 return me;
10467}
10468
10469/**
10470 * Reads in the part of the Texture that was written with
10471 * do_write_datagram_body().
10472 */
10473void Texture::
10474do_fillin_body(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10475 cdata->_default_sampler.read_datagram(scan, manager);
10476
10477 if (manager->get_file_minor_ver() >= 1) {
10478 cdata->_compression = (CompressionMode)scan.get_uint8();
10479 }
10480 if (manager->get_file_minor_ver() >= 16) {
10481 cdata->_quality_level = (QualityLevel)scan.get_uint8();
10482 }
10483
10484 cdata->_format = (Format)scan.get_uint8();
10485 cdata->_num_components = scan.get_uint8();
10486
10487 if (cdata->_texture_type == TT_buffer_texture) {
10488 cdata->_usage_hint = (GeomEnums::UsageHint)scan.get_uint8();
10489 }
10490
10491 cdata->inc_properties_modified();
10492
10493 cdata->_auto_texture_scale = ATS_unspecified;
10494 if (manager->get_file_minor_ver() >= 28) {
10495 cdata->_auto_texture_scale = (AutoTextureScale)scan.get_uint8();
10496 }
10497
10498 bool has_simple_ram_image = false;
10499 if (manager->get_file_minor_ver() >= 18) {
10500 cdata->_orig_file_x_size = scan.get_uint32();
10501 cdata->_orig_file_y_size = scan.get_uint32();
10502
10504 }
10505
10507 cdata->_simple_x_size = scan.get_uint32();
10508 cdata->_simple_y_size = scan.get_uint32();
10509 cdata->_simple_image_date_generated = scan.get_int32();
10510
10511 size_t u_size = scan.get_uint32();
10512
10513 // Protect against large allocation.
10514 if (u_size > scan.get_remaining_size()) {
10515 gobj_cat.error()
10516 << "simple RAM image extends past end of datagram, is texture corrupt?\n";
10517 return;
10518 }
10519
10520 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10521 scan.extract_bytes(image.p(), u_size);
10522
10523 cdata->_simple_ram_image._image = image;
10524 cdata->_simple_ram_image._page_size = u_size;
10525 cdata->inc_simple_image_modified();
10526 }
10527
10528 if (manager->get_file_minor_ver() >= 45) {
10529 cdata->_has_clear_color = scan.get_bool();
10530 if (cdata->_has_clear_color) {
10531 cdata->_clear_color.read_datagram(scan);
10532 }
10533 }
10534}
10535
10536/**
10537 * Reads in the part of the Texture that was written with
10538 * do_write_datagram_rawdata().
10539 */
10540void Texture::
10541do_fillin_rawdata(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10542 cdata->_x_size = scan.get_uint32();
10543 cdata->_y_size = scan.get_uint32();
10544 cdata->_z_size = scan.get_uint32();
10545
10546 if (manager->get_file_minor_ver() >= 30) {
10547 cdata->_pad_x_size = scan.get_uint32();
10548 cdata->_pad_y_size = scan.get_uint32();
10549 cdata->_pad_z_size = scan.get_uint32();
10550 } else {
10551 do_set_pad_size(cdata, 0, 0, 0);
10552 }
10553
10554 cdata->_num_views = 1;
10555 if (manager->get_file_minor_ver() >= 26) {
10556 cdata->_num_views = scan.get_uint32();
10557 }
10558 cdata->_component_type = (ComponentType)scan.get_uint8();
10559 cdata->_component_width = scan.get_uint8();
10560 cdata->_ram_image_compression = CM_off;
10561 if (manager->get_file_minor_ver() >= 1) {
10562 cdata->_ram_image_compression = (CompressionMode)scan.get_uint8();
10563 }
10564
10565 int num_ram_images = 1;
10566 if (manager->get_file_minor_ver() >= 3) {
10567 num_ram_images = scan.get_uint8();
10568 }
10569
10570 cdata->_ram_images.clear();
10571 cdata->_ram_images.reserve(num_ram_images);
10572 for (int n = 0; n < num_ram_images; ++n) {
10573 cdata->_ram_images.push_back(RamImage());
10574 cdata->_ram_images[n]._page_size = get_expected_ram_page_size();
10575 if (manager->get_file_minor_ver() >= 1) {
10576 cdata->_ram_images[n]._page_size = scan.get_uint32();
10577 }
10578
10579 // fill the cdata->_image buffer with image data
10580 size_t u_size = scan.get_uint32();
10581
10582 // Protect against large allocation.
10583 if (u_size > scan.get_remaining_size()) {
10584 gobj_cat.error()
10585 << "RAM image " << n << " extends past end of datagram, is texture corrupt?\n";
10586 return;
10587 }
10588
10589 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10590 scan.extract_bytes(image.p(), u_size);
10591
10592 cdata->_ram_images[n]._image = image;
10593 }
10594 cdata->_loaded_from_image = true;
10595 cdata->inc_image_modified();
10596}
10597
10598/**
10599 * Called in make_from_bam(), this method properly copies the attributes from
10600 * the bam stream (as stored in dummy) into this texture, updating the
10601 * modified flags appropriately.
10602 */
10603void Texture::
10604do_fillin_from(CData *cdata, const Texture *dummy) {
10605 // Use the setters instead of setting these directly, so we can correctly
10606 // avoid incrementing cdata->_properties_modified if none of these actually
10607 // change. (Otherwise, we'd have to reload the texture to the GSG every
10608 // time we loaded a new bam file that reference the texture, since each bam
10609 // file reference passes through this function.)
10610
10611 CDReader cdata_dummy(dummy->_cycler);
10612
10613 do_set_wrap_u(cdata, cdata_dummy->_default_sampler.get_wrap_u());
10614 do_set_wrap_v(cdata, cdata_dummy->_default_sampler.get_wrap_v());
10615 do_set_wrap_w(cdata, cdata_dummy->_default_sampler.get_wrap_w());
10616 do_set_border_color(cdata, cdata_dummy->_default_sampler.get_border_color());
10617
10618 if (cdata_dummy->_default_sampler.get_minfilter() != SamplerState::FT_default) {
10619 do_set_minfilter(cdata, cdata_dummy->_default_sampler.get_minfilter());
10620 }
10621 if (cdata_dummy->_default_sampler.get_magfilter() != SamplerState::FT_default) {
10622 do_set_magfilter(cdata, cdata_dummy->_default_sampler.get_magfilter());
10623 }
10624 if (cdata_dummy->_default_sampler.get_anisotropic_degree() != 0) {
10625 do_set_anisotropic_degree(cdata, cdata_dummy->_default_sampler.get_anisotropic_degree());
10626 }
10627 if (cdata_dummy->_compression != CM_default) {
10628 do_set_compression(cdata, cdata_dummy->_compression);
10629 }
10630 if (cdata_dummy->_quality_level != QL_default) {
10631 do_set_quality_level(cdata, cdata_dummy->_quality_level);
10632 }
10633
10634 Format format = cdata_dummy->_format;
10635 int num_components = cdata_dummy->_num_components;
10636
10637 if (num_components == cdata->_num_components) {
10638 // Only reset the format if the number of components hasn't changed, since
10639 // if the number of components has changed our texture no longer matches
10640 // what it was when the bam was written.
10641 do_set_format(cdata, format);
10642 }
10643
10644 if (!cdata_dummy->_simple_ram_image._image.empty()) {
10645 // Only replace the simple ram image if it was generated more recently
10646 // than the one we already have.
10647 if (cdata->_simple_ram_image._image.empty() ||
10648 cdata_dummy->_simple_image_date_generated > cdata->_simple_image_date_generated) {
10649 do_set_simple_ram_image(cdata,
10650 cdata_dummy->_simple_ram_image._image,
10651 cdata_dummy->_simple_x_size,
10652 cdata_dummy->_simple_y_size);
10653 cdata->_simple_image_date_generated = cdata_dummy->_simple_image_date_generated;
10654 }
10655 }
10656}
10657
10658/**
10659 *
10660 */
10661Texture::CData::
10662CData() {
10663 _primary_file_num_channels = 0;
10664 _alpha_file_channel = 0;
10665 _keep_ram_image = true;
10666 _compression = CM_default;
10667 _auto_texture_scale = ATS_unspecified;
10668 _ram_image_compression = CM_off;
10669 _render_to_texture = false;
10670 _match_framebuffer_format = false;
10671 _post_load_store_cache = false;
10672 _quality_level = QL_default;
10673
10674 _texture_type = TT_2d_texture;
10675 _x_size = 0;
10676 _y_size = 1;
10677 _z_size = 1;
10678 _num_views = 1;
10679
10680 // We will override the format in a moment (in the Texture constructor), but
10681 // set it to something else first to avoid the check in do_set_format
10682 // depending on an uninitialized value.
10683 _format = F_rgba;
10684
10685 // Only used for buffer textures.
10686 _usage_hint = GeomEnums::UH_unspecified;
10687
10688 _pad_x_size = 0;
10689 _pad_y_size = 0;
10690 _pad_z_size = 0;
10691
10692 _orig_file_x_size = 0;
10693 _orig_file_y_size = 0;
10694
10695 _loaded_from_image = false;
10696 _loaded_from_txo = false;
10697 _has_read_pages = false;
10698 _has_read_mipmaps = false;
10699 _num_mipmap_levels_read = 0;
10700
10701 _simple_x_size = 0;
10702 _simple_y_size = 0;
10703 _simple_ram_image._page_size = 0;
10704
10705 _has_clear_color = false;
10706}
10707
10708/**
10709 *
10710 */
10711Texture::CData::
10712CData(const Texture::CData &copy) {
10713 _num_mipmap_levels_read = 0;
10714
10715 do_assign(&copy);
10716
10717 _properties_modified = copy._properties_modified;
10718 _image_modified = copy._image_modified;
10719 _simple_image_modified = copy._simple_image_modified;
10720}
10721
10722/**
10723 *
10724 */
10725CycleData *Texture::CData::
10726make_copy() const {
10727 return new CData(*this);
10728}
10729
10730/**
10731 *
10732 */
10733void Texture::CData::
10734do_assign(const Texture::CData *copy) {
10735 _filename = copy->_filename;
10736 _alpha_filename = copy->_alpha_filename;
10737 if (!copy->_fullpath.empty()) {
10738 // Since the fullpath is often empty on a file loaded directly from a txo,
10739 // we only assign the fullpath if it is not empty.
10740 _fullpath = copy->_fullpath;
10741 _alpha_fullpath = copy->_alpha_fullpath;
10742 }
10743 _primary_file_num_channels = copy->_primary_file_num_channels;
10744 _alpha_file_channel = copy->_alpha_file_channel;
10745 _x_size = copy->_x_size;
10746 _y_size = copy->_y_size;
10747 _z_size = copy->_z_size;
10748 _num_views = copy->_num_views;
10749 _pad_x_size = copy->_pad_x_size;
10750 _pad_y_size = copy->_pad_y_size;
10751 _pad_z_size = copy->_pad_z_size;
10752 _orig_file_x_size = copy->_orig_file_x_size;
10753 _orig_file_y_size = copy->_orig_file_y_size;
10754 _num_components = copy->_num_components;
10755 _component_width = copy->_component_width;
10756 _texture_type = copy->_texture_type;
10757 _format = copy->_format;
10758 _component_type = copy->_component_type;
10759 _loaded_from_image = copy->_loaded_from_image;
10760 _loaded_from_txo = copy->_loaded_from_txo;
10761 _has_read_pages = copy->_has_read_pages;
10762 _has_read_mipmaps = copy->_has_read_mipmaps;
10763 _num_mipmap_levels_read = copy->_num_mipmap_levels_read;
10764 _default_sampler = copy->_default_sampler;
10765 _keep_ram_image = copy->_keep_ram_image;
10766 _compression = copy->_compression;
10767 _match_framebuffer_format = copy->_match_framebuffer_format;
10768 _quality_level = copy->_quality_level;
10769 _auto_texture_scale = copy->_auto_texture_scale;
10770 _ram_image_compression = copy->_ram_image_compression;
10771 _ram_images = copy->_ram_images;
10772 _simple_x_size = copy->_simple_x_size;
10773 _simple_y_size = copy->_simple_y_size;
10774 _simple_ram_image = copy->_simple_ram_image;
10775}
10776
10777/**
10778 * Writes the contents of this object to the datagram for shipping out to a
10779 * Bam file.
10780 */
10781void Texture::CData::
10782write_datagram(BamWriter *manager, Datagram &dg) const {
10783}
10784
10785/**
10786 * Receives an array of pointers, one for each time manager->read_pointer()
10787 * was called in fillin(). Returns the number of pointers processed.
10788 */
10789int Texture::CData::
10790complete_pointers(TypedWritable **p_list, BamReader *manager) {
10791 return 0;
10792}
10793
10794/**
10795 * This internal function is called by make_from_bam to read in all of the
10796 * relevant data from the BamFile for the new Geom.
10797 */
10798void Texture::CData::
10799fillin(DatagramIterator &scan, BamReader *manager) {
10800}
10801
10802/**
10803 *
10804 */
10805ostream &
10806operator << (ostream &out, Texture::TextureType tt) {
10807 return out << Texture::format_texture_type(tt);
10808}
10809
10810/**
10811 *
10812 */
10813ostream &
10814operator << (ostream &out, Texture::ComponentType ct) {
10815 return out << Texture::format_component_type(ct);
10816}
10817
10818/**
10819 *
10820 */
10821ostream &
10822operator << (ostream &out, Texture::Format f) {
10823 return out << Texture::format_format(f);
10824}
10825
10826/**
10827 *
10828 */
10829ostream &
10830operator << (ostream &out, Texture::CompressionMode cm) {
10831 return out << Texture::format_compression_mode(cm);
10832}
10833
10834/**
10835 *
10836 */
10837ostream &
10838operator << (ostream &out, Texture::QualityLevel tql) {
10839 return out << Texture::format_quality_level(tql);
10840}
10841
10842/**
10843 *
10844 */
10845istream &
10846operator >> (istream &in, Texture::QualityLevel &tql) {
10847 string word;
10848 in >> word;
10849
10851 return in;
10852}
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition: bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This class represents a thread-safe handle to a promised future result of an asynchronous operation,...
Definition: asyncFuture.h:61
An instance of this class is written to the front of a Bam or Txo file to make the file a cached inst...
void add_dependent_file(const Filename &pathname)
Adds the indicated file to the list of files that will be loaded to generate the data in this record.
get_data
Returns a pointer to the data stored in the record, or NULL if there is no data.
set_data
Stores a new data object on the record.
This class maintains a cache of Bam and/or Txo objects generated from model files and texture images ...
Definition: bamCache.h:42
get_cache_textures
Returns whether texture files (e.g.
Definition: bamCache.h:90
bool store(BamCacheRecord *record)
Flushes a cache entry to disk.
Definition: bamCache.cxx:194
static BamCache * get_global_ptr()
Returns a pointer to the global BamCache object, which is used automatically by the ModelPool and Tex...
Definition: bamCache.I:223
get_cache_compressed_textures
Returns whether compressed texture files will be stored in the cache, as compressed txo files.
Definition: bamCache.h:92
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:808
bool resolve()
This may be called at any time during processing of the Bam file to resolve all the known pointers so...
Definition: bamReader.cxx:325
bool init()
Initializes the BamReader prior to reading any objects from its source.
Definition: bamReader.cxx:85
get_filename
If a BAM is a file, then the BamReader should contain the name of the file.
Definition: bamReader.h:155
TypedWritable * read_object()
Reads a single object from the Bam file.
Definition: bamReader.cxx:224
get_loader_options
Returns the LoaderOptions passed to the loader when the model was requested, if any.
Definition: bamReader.h:156
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being read.
Definition: bamReader.I:83
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:63
get_file_texture_mode
Returns the BamTextureMode preference indicated by the Bam file currently being written.
Definition: bamWriter.h:95
get_filename
If a BAM is a file, then the BamWriter should contain the name of the file.
Definition: bamWriter.h:92
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being written.
Definition: bamWriter.I:59
get_active
Returns the active flag associated with this object.
Definition: bufferContext.h:55
get_resident
Returns the resident flag associated with this object.
Definition: bufferContext.h:56
get_data_size_bytes
Returns the number of bytes previously reported for the data object.
Definition: bufferContext.h:53
void notify_all()
Informs all of the other threads who are currently blocked on wait() that the relevant condition has ...
void wait()
Waits on the condition.
This class specializes ConfigVariable as an enumerated type.
int get_word(size_t n) const
Returns the variable's nth value.
std::string get_unique_value(size_t n) const
Returns the nth unique value of the variable.
size_t get_num_unique_values() const
Returns the number of unique values in the variable.
PointerToArray< Element > cast_non_const() const
Casts away the constness of the CPTA(Element), and returns an equivalent PTA(Element).
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
Definition: cullTraverser.h:45
This template class calls PipelineCycler::read() in the constructor and PipelineCycler::release_read(...
This template class calls PipelineCycler::read_unlocked(), and then provides a transparent read-only ...
This template class calls PipelineCycler::write() in the constructor and PipelineCycler::release_writ...
A single page of data maintained by a PipelineCycler.
Definition: cycleData.h:50
This class stores a list of directories that can be searched, in order, to locate a particular file.
Definition: dSearchPath.h:28
This class can be used to read a binary file that consists of an arbitrary header followed by a numbe...
bool read_header(std::string &header, size_t num_bytes)
Reads a sequence of bytes from the beginning of the datagram file.
bool open(const FileReference *file)
Opens the indicated filename for reading.
A class to retrieve the individual data elements previously stored in a Datagram.
uint8_t get_uint8()
Extracts an unsigned 8-bit integer.
vector_uchar extract_bytes(size_t size)
Extracts the indicated number of bytes in the datagram and returns them as a string.
uint32_t get_uint32()
Extracts an unsigned 32-bit integer.
bool get_bool()
Extracts a boolean value.
std::string get_string()
Extracts a variable-length string.
int32_t get_int32()
Extracts a signed 32-bit integer.
size_t get_remaining_size() const
Return the bytes left in the datagram.
This class can be used to write a binary file that consists of an arbitrary header followed by a numb...
bool open(const FileReference *file)
Opens the indicated filename for writing.
bool write_header(const std::string &header)
Writes a sequence of bytes to the beginning of the datagram file.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:38
void add_uint32(uint32_t value)
Adds an unsigned 32-bit integer to the datagram.
Definition: datagram.I:94
void add_int16(int16_t value)
Adds a signed 16-bit integer to the datagram.
Definition: datagram.I:58
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:67
void add_uint8(uint8_t value)
Adds an unsigned 8-bit integer to the datagram.
Definition: datagram.I:50
void add_bool(bool value)
Adds a boolean value to the datagram.
Definition: datagram.I:34
void append_data(const void *data, size_t size)
Appends some more raw data to the end of the datagram.
Definition: datagram.cxx:129
void add_string(const std::string &str)
Adds a variable-length string to the datagram.
Definition: datagram.I:219
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:36
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:73
The name of a file, such as a texture file or an Egg file.
Definition: filename.h:39
std::string get_basename() const
Returns the basename part of the filename.
Definition: filename.I:367
Filename get_filename_index(int index) const
If the pattern flag is set for this Filename and the filename string actually includes a sequence of ...
Definition: filename.cxx:836
bool has_hash() const
Returns true if the filename is indicated to be a filename pattern (that is, set_pattern(true) was ca...
Definition: filename.I:531
void set_basename_wo_extension(const std::string &s)
Replaces the basename part of the filename, without the file extension.
Definition: filename.cxx:783
int find_on_searchpath(const DSearchPath &searchpath)
Performs the reverse of the resolve_filename() operation: assuming that the current filename is fully...
Definition: filename.cxx:1689
bool make_relative_to(Filename directory, bool allow_backups=true)
Adjusts this filename, which must be a fully-specified pathname beginning with a slash,...
Definition: filename.cxx:1640
std::string get_basename_wo_extension() const
Returns the basename part of the filename, without the file extension.
Definition: filename.I:386
void make_absolute()
Converts the filename to a fully-qualified pathname from the root (if it is a relative pathname),...
Definition: filename.cxx:968
static Filename pattern_filename(const std::string &filename)
Constructs a filename that represents a sequence of numbered files.
Definition: filename.I:160
This class can be used to test for string matches against standard Unix- shell filename globbing conv...
Definition: globPattern.h:32
bool matches(const std::string &candidate) const
Returns true if the candidate string matches the pattern, false otherwise.
Definition: globPattern.I:122
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
static GraphicsStateGuardianBase * get_default_gsg()
Returns a pointer to the "default" GSG.
Encodes a string name in a hash table, mapping it to a pointer.
Definition: internalName.h:38
get_name
Returns the complete name represented by the InternalName and all of its parents.
Definition: internalName.h:61
Specifies parameters that may be passed to the loader.
Definition: loaderOptions.h:23
set_auto_texture_scale
Set this flag to ATS_none, ATS_up, ATS_down, or ATS_pad to control how a texture is scaled from disk ...
Definition: loaderOptions.h:69
get_auto_texture_scale
See set_auto_texture_scale().
Definition: loaderOptions.h:69
get_texture_num_views
See set_texture_num_views().
Definition: loaderOptions.h:64
void unlock()
Alias for release() to match C++11 semantics.
Definition: mutexDirect.I:39
void lock()
Alias for acquire() to match C++11 semantics.
Definition: mutexDirect.I:19
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
Definition: mutexHolder.h:25
A base class for all things which can have a name.
Definition: namable.h:26
bool has_name() const
Returns true if the Namable has a nonempty name set, false if the name is empty.
Definition: namable.I:44
static PNMFileTypeRegistry * get_global_ptr()
Returns a pointer to the global PNMFileTypeRegistry object.
PNMFileType * get_type_from_extension(const std::string &filename) const
Tries to determine what the PNMFileType is likely to be for a particular image file based on its exte...
This is the base class of a family of classes that represent particular image file types that PNMImag...
Definition: pnmFileType.h:32
get_maxval
Returns the maximum channel value allowable for any pixel in this image; for instance,...
int get_x_size() const
Returns the number of pixels in the X direction.
PNMReader * make_reader(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true) const
Returns a newly-allocated PNMReader of the suitable type for reading from the indicated image filenam...
static bool is_grayscale(ColorType color_type)
This static variant of is_grayscale() returns true if the indicated image type represents a grayscale...
get_num_channels
Returns the number of channels in the image.
static bool has_alpha(ColorType color_type)
This static variant of has_alpha() returns true if the indicated image type includes an alpha channel...
int get_y_size() const
Returns the number of pixels in the Y direction.
get_type
If the file type is known (e.g.
The name of this class derives from the fact that we originally implemented it as a layer on top of t...
Definition: pnmImage.h:58
void clear()
Frees all memory allocated for the image, and clears all its parameters (size, color,...
Definition: pnmImage.cxx:48
void set_read_size(int x_size, int y_size)
Specifies the size to we'd like to scale the image upon reading it.
Definition: pnmImage.I:288
xelval get_channel_val(int x, int y, int channel) const
Returns the nth component color at the indicated pixel.
Definition: pnmImage.cxx:837
void set_blue(int x, int y, float b)
Sets the blue component color only at the indicated pixel.
Definition: pnmImage.I:836
void alpha_fill(float alpha=0.0)
Sets the entire alpha channel to the given level.
Definition: pnmImage.I:272
xelval get_green_val(int x, int y) const
Returns the green component color at the indicated pixel.
Definition: pnmImage.I:462
void set_green(int x, int y, float g)
Sets the green component color only at the indicated pixel.
Definition: pnmImage.I:827
float get_alpha(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:809
float get_gray(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:799
void quick_filter_from(const PNMImage &copy, int xborder=0, int yborder=0)
Resizes from the given image, with a fixed radius of 0.5.
void fill(float red, float green, float blue)
Sets the entire image (except the alpha channel) to the given color.
Definition: pnmImage.I:246
void set_num_channels(int num_channels)
Changes the number of channels associated with the image.
Definition: pnmImage.I:353
xelval get_alpha_val(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:494
void set_red(int x, int y, float r)
Sets the red component color only at the indicated pixel.
Definition: pnmImage.I:818
void copy_header_from(const PNMImageHeader &header)
Copies just the header information into this image.
Definition: pnmImage.cxx:200
void take_from(PNMImage &orig)
Move the contents of the other image into this one, and empty the other image.
Definition: pnmImage.cxx:224
bool is_valid() const
Returns true if the image has been read in or correctly initialized with a height and width.
Definition: pnmImage.I:342
xelval get_blue_val(int x, int y) const
Returns the blue component color at the indicated pixel.
Definition: pnmImage.I:472
bool read(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true)
Reads the indicated image filename.
Definition: pnmImage.cxx:278
xel * get_array()
Directly access the underlying PNMImage array.
Definition: pnmImage.I:1098
xelval get_red_val(int x, int y) const
Returns the red component color at the indicated pixel.
Definition: pnmImage.I:452
int get_read_y_size() const
Returns the requested y_size of the image if set_read_size() has been called, or the image y_size oth...
Definition: pnmImage.I:324
xelval get_gray_val(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:484
void set_alpha(int x, int y, float a)
Sets the alpha component color only at the indicated pixel.
Definition: pnmImage.I:859
ColorSpace get_color_space() const
Returns the color space in which the image is encoded.
Definition: pnmImage.I:332
void add_alpha()
Adds an alpha channel to the image, if it does not already have one.
Definition: pnmImage.I:363
xelval * get_alpha_array()
Directly access the underlying PNMImage array of alpha values.
Definition: pnmImage.I:1115
bool write(const Filename &filename, PNMFileType *type=nullptr) const
Writes the image to the indicated filename.
Definition: pnmImage.cxx:385
int get_read_x_size() const
Returns the requested x_size of the image if set_read_size() has been called, or the image x_size oth...
Definition: pnmImage.I:315
This is an abstract base class that defines the interface for reading image files of various types.
Definition: pnmReader.h:27
virtual bool is_floating_point()
Returns true if this PNMFileType represents a floating-point image type, false if it is a normal,...
Definition: pnmReader.cxx:71
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:30
Defines a pfm file, a 2-d table of floating-point numbers, either 3-component or 1-component,...
Definition: pfmFile.h:31
bool read(const Filename &fullpath)
Reads the PFM data from the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:121
bool write(const Filename &fullpath)
Writes the PFM data to the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:204
bool store(PNMImage &pnmimage) const
Copies the data to the indicated PNMImage, converting to RGB values.
Definition: pfmFile.cxx:360
void set_channel(int x, int y, int c, PN_float32 value)
Replaces the cth channel of the point value at the indicated point.
Definition: pfmFile.I:63
bool load(const PNMImage &pnmimage)
Fills the PfmFile with the data from the indicated PNMImage, converted to floating-point values.
Definition: pfmFile.cxx:287
PN_float32 get_channel(int x, int y, int c) const
Returns the cth channel of the point value at the indicated point.
Definition: pfmFile.I:52
void clear()
Eliminates all data in the file.
Definition: pfmFile.cxx:77
A table of objects that are saved within the graphics context for reference by handle later.
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
void ref() const
Explicitly increments the reference count.
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
Represents a set of settings that indicate how a texture is sampled.
Definition: samplerState.h:36
get_minfilter
Returns the filter mode of the texture for minification.
Definition: samplerState.h:115
get_wrap_v
Returns the wrap mode of the texture in the V direction.
Definition: samplerState.h:113
get_anisotropic_degree
Returns the degree of anisotropic filtering that should be applied to the texture.
Definition: samplerState.h:119
get_magfilter
Returns the filter mode of the texture for magnification.
Definition: samplerState.h:116
get_wrap_w
Returns the wrap mode of the texture in the W direction.
Definition: samplerState.h:114
get_wrap_u
Returns the wrap mode of the texture in the U direction.
Definition: samplerState.h:112
get_border_color
Returns the solid color of the texture's border.
Definition: samplerState.h:121
A class to read sequential binary data directly from an istream.
Definition: streamReader.h:28
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool was_image_modified() const
Returns true if the texture image has been modified since the last time mark_loaded() was called.
An instance of this object is returned by Texture::peek().
Definition: texturePeeker.h:27
static Texture * load_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads the given filename up into a texture, if it has not already been loaded, and returns the new te...
Definition: texturePool.I:70
static Texture * get_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false)
Returns the texture that has already been previously loaded, or NULL otherwise.
Definition: texturePool.I:41
static Texture * load_2d_texture_array(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 2-D texture array that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:124
static void add_texture(Texture *texture)
Adds the indicated already-loaded texture to the pool.
Definition: texturePool.I:177
static Texture * load_cube_map(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a cube map texture that is specified with a series of 6 pages, numbered 0 through 5.
Definition: texturePool.I:141
static Texture * load_3d_texture(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 3-D texture that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:107
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition: texture.h:71
CPTA_uchar get_ram_image_as(const std::string &requested_format)
Returns the uncompressed system-RAM image data associated with the texture.
Definition: texture.cxx:7398
static TextureType string_texture_type(const std::string &str)
Returns the TextureType corresponding to the indicated string word.
Definition: texture.cxx:2104
virtual void ensure_loader_type(const Filename &filename)
May be called prior to calling read_txo() or any bam-related Texture- creating callback,...
Definition: texture.cxx:2837
static PT(Texture) make_from_txo(std bool write_txo(std::ostream &out, const std::string &filename="") const
Writes the texture to a Panda texture object.
Definition: texture.cxx:929
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition: texture.cxx:1982
static std::string format_component_type(ComponentType ct)
Returns the indicated ComponentType converted to a string word.
Definition: texture.cxx:2130
Texture(const std::string &name=std::string())
Constructs an empty texture.
Definition: texture.cxx:375
bool get_resident(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture is reported to be resident within graphics memory for the indicated GSG.
Definition: texture.cxx:1547
Texture * load_related(const InternalName *suffix) const
Loads a texture whose filename is derived by concatenating a suffix to the filename of this texture.
Definition: texture.cxx:974
static CompressionMode string_compression_mode(const std::string &str)
Returns the CompressionMode value associated with the given string representation.
Definition: texture.cxx:2463
PTA_uchar new_simple_ram_image(int x_size, int y_size)
Creates an empty array for the simple ram image of the indicated size, and returns a modifiable point...
Definition: texture.cxx:1304
static bool is_specific(CompressionMode compression)
Returns true if the indicated compression mode is one of the specific compression types,...
Definition: texture.cxx:2617
bool has_ram_image() const
Returns true if the Texture has its image contents available in main RAM, false if it exists only in ...
Definition: texture.I:1242
static std::string format_quality_level(QualityLevel tql)
Returns the indicated QualityLevel converted to a string word.
Definition: texture.cxx:2506
size_t estimate_texture_memory() const
Estimates the amount of texture memory that will be consumed by loading this texture.
Definition: texture.cxx:676
bool read(const Filename &fullpath, const LoaderOptions &options=LoaderOptions())
Reads the named filename into the texture.
Definition: texture.cxx:552
void consider_rescale(PNMImage &pnmimage)
Asks the PNMImage to change its scale when it reads the image, according to the whims of the Config....
Definition: texture.cxx:2040
get_texture_type
Returns the overall interpretation of the texture.
Definition: texture.h:365
bool write(const Filename &fullpath)
Writes the texture to the named filename.
Definition: texture.I:298
static bool has_binary_alpha(Format format)
Returns true if the indicated format includes a binary alpha only, false otherwise.
Definition: texture.cxx:2664
void * get_ram_mipmap_pointer(int n) const
Similiar to get_ram_mipmap_image(), however, in this case the void pointer for the given ram image is...
Definition: texture.cxx:1229
static std::string format_compression_mode(CompressionMode cm)
Returns the indicated CompressionMode converted to a string word.
Definition: texture.cxx:2421
get_aux_data
Returns a record previously recorded via set_aux_data().
Definition: texture.h:552
static bool is_srgb(Format format)
Returns true if the indicated format is in the sRGB color space, false otherwise.
Definition: texture.cxx:2679
void set_orig_file_size(int x, int y, int z=1)
Specifies the size of the texture as it exists in its original disk file, before any Panda scaling.
Definition: texture.cxx:1962
bool get_active(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture was rendered in the most recent frame within the indicated GSG.
Definition: texture.cxx:1520
get_keep_ram_image
Returns the flag that indicates whether this Texture is eligible to have its main RAM copy of the tex...
Definition: texture.h:472
bool read_dds(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a DDS file object.
Definition: texture.cxx:944
void generate_normalization_cube_map(int size)
Generates a special cube map image in the texture that can be used to apply bump mapping effects: for...
Definition: texture.cxx:425
bool has_compression() const
Returns true if the texture indicates it wants to be compressed, either with CM_on or higher,...
Definition: texture.I:1102
static QualityLevel string_quality_level(const std::string &str)
Returns the QualityLevel value associated with the given string representation.
Definition: texture.cxx:2526
void generate_alpha_scale_map()
Generates a special 256x1 1-d texture that can be used to apply an arbitrary alpha scale to objects b...
Definition: texture.cxx:527
bool read_txo(std::istream &in, const std::string &filename="")
Reads the texture from a Panda texture object.
Definition: texture.cxx:846
static ComponentType string_component_type(const std::string &str)
Returns the ComponentType corresponding to the indicated string word.
Definition: texture.cxx:2159
static void register_with_read_factory()
Factory method to generate a Texture object.
Definition: texture.cxx:10000
static bool adjust_size(int &x_size, int &y_size, const std::string &name, bool for_padding, AutoTextureScale auto_texture_scale=ATS_unspecified)
Computes the proper size of the texture, based on the original size, the filename,...
Definition: texture.cxx:2727
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
Definition: texture.cxx:10028
static int up_to_power_2(int value)
Returns the smallest power of 2 greater than or equal to value.
Definition: texture.cxx:2009
static AutoTextureScale get_textures_power_2()
This flag returns ATS_none, ATS_up, or ATS_down and controls the scaling of textures in general.
Definition: texture.I:1863
get_auto_texture_scale
Returns the power-of-2 texture-scaling mode that will be applied to this particular texture when it i...
Definition: texture.h:532
void set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size)
Accepts a raw pointer cast as an int, which is then passed to set_ram_mipmap_pointer(); see the docum...
Definition: texture.cxx:1270
virtual void write_datagram(BamWriter *manager, Datagram &me)
Function to write the important information in the particular object to a Datagram.
Definition: texture.cxx:10009
static int down_to_power_2(int value)
Returns the largest power of 2 less than or equal to value.
Definition: texture.cxx:2021
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition: texture.cxx:1574
virtual bool has_cull_callback() const
Should be overridden by derived classes to return true if cull_callback() has been defined.
Definition: texture.cxx:2575
bool uses_mipmaps() const
Returns true if the minfilter settings on this texture indicate the use of mipmapping,...
Definition: texture.I:1127
static std::string format_texture_type(TextureType tt)
Returns the indicated TextureType converted to a string word.
Definition: texture.cxx:2078
has_simple_ram_image
Returns true if the Texture has a "simple" image available in main RAM.
Definition: texture.h:517
static bool is_integer(Format format)
Returns true if the indicated format is an integer format, false otherwise.
Definition: texture.cxx:2696
PTA_uchar modify_simple_ram_image()
Returns a modifiable pointer to the internal "simple" texture image.
Definition: texture.cxx:1293
void clear_ram_mipmap_image(int n)
Discards the current system-RAM image for the nth mipmap level.
Definition: texture.cxx:1278
bool was_image_modified(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture needs to be re-loaded onto the indicated GSG, either because its image da...
Definition: texture.cxx:1461
bool read_ktx(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a KTX file object.
Definition: texture.cxx:961
size_t get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const
Returns the number of bytes which the texture is reported to consume within graphics memory,...
Definition: texture.cxx:1493
get_expected_ram_page_size
Returns the number of bytes that should be used per each Z page of the 3-d texture.
Definition: texture.h:449
virtual bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
If has_cull_callback() returns true, this function will be called during the cull traversal to perfor...
Definition: texture.cxx:2589
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition: texture.cxx:1445
void set_ram_image_as(CPTA_uchar image, const std::string &provided_format)
Replaces the current system-RAM image with the new data, converting it first if necessary from the in...
Definition: texture.cxx:1026
void set_ram_mipmap_pointer(int n, void *image, size_t page_size=0)
Sets an explicit void pointer as the texture's mipmap image for the indicated level.
Definition: texture.cxx:1248
set_aux_data
Records an arbitrary object in the Texture, associated with a specified key.
Definition: texture.h:552
void texture_uploaded()
This method is called by the GraphicsEngine at the beginning of the frame *after* a texture has been ...
Definition: texture.cxx:2552
void set_size_padded(int x=1, int y=1, int z=1)
Changes the size of the texture, padding if necessary, and setting the pad region as well.
Definition: texture.cxx:1933
static bool has_alpha(Format format)
Returns true if the indicated format includes alpha, false otherwise.
Definition: texture.cxx:2633
get_num_loadable_ram_mipmap_images
Returns the number of contiguous mipmap levels that exist in RAM, up until the first gap in the seque...
Definition: texture.h:502
void generate_simple_ram_image()
Computes the "simple" ram image by loading the main RAM image, if it is not already available,...
Definition: texture.cxx:1325
static Format string_format(const std::string &str)
Returns the Format corresponding to the indicated string word.
Definition: texture.cxx:2303
clear_aux_data
Removes a record previously recorded via set_aux_data().
Definition: texture.h:552
int release_all()
Frees the context allocated on all objects for which the texture has been declared.
Definition: texture.cxx:1600
CPTA_uchar get_ram_mipmap_image(int n) const
Returns the system-RAM image data associated with the nth mipmap level, if present.
Definition: texture.cxx:1215
static std::string format_format(Format f)
Returns the indicated Format converted to a string word.
Definition: texture.cxx:2189
is_cacheable
Returns true if there is enough information in this Texture object to write it to the bam cache succe...
Definition: texture.h:473
static bool is_unsigned(ComponentType ctype)
Returns true if the indicated component type is unsigned, false otherwise.
Definition: texture.cxx:2605
A thread; that is, a lightweight process.
Definition: thread.h:46
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition: thread.I:212
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition: thread.h:109
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
bool is_exact_type(TypeHandle handle) const
Returns true if the current object is the indicated type exactly.
Definition: typedObject.I:38
bool is_of_type(TypeHandle handle) const
Returns true if the current object is or derives from the indicated type.
Definition: typedObject.I:28
A base class for things which need to inherit from both TypedObject and from ReferenceCount.
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:35
A hierarchy of directories and files that appears to be one continuous file system,...
static void close_write_file(std::ostream *stream)
Closes a file opened by a previous call to open_write_file().
Filename get_cwd() const
Returns the current directory name.
bool exists(const Filename &filename) const
Convenience function; returns true if the named file exists.
bool resolve_filename(Filename &filename, const DSearchPath &searchpath, const std::string &default_extension=std::string()) const
Searches the given search path for the filename.
std::ostream * open_write_file(const Filename &filename, bool auto_wrap, bool truncate)
Convenience function; returns a newly allocated ostream if the file exists and can be written,...
static void close_read_file(std::istream *stream)
Closes a file opened by a previous call to open_read_file().
PointerTo< VirtualFile > get_file(const Filename &filename, bool status_only=false) const
Looks up the file by the indicated name in the file system.
static VirtualFileSystem * get_global_ptr()
Returns the default global VirtualFileSystem.
The abstract base class for a file or directory within the VirtualFileSystem.
Definition: virtualFile.h:35
This is our own Panda specialization on the default STL map.
Definition: pmap.h:49
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
BEGIN_PUBLISH EXPCL_PANDA_PNMIMAGE float decode_sRGB_float(unsigned char val)
Decodes the sRGB-encoded unsigned char value to a linearized float in the range 0-1.
Definition: convert_srgb.I:18
EXPCL_PANDA_PNMIMAGE unsigned char encode_sRGB_uchar(unsigned char val)
Encodes the linearized unsigned char value to an sRGB-encoded unsigned char value.
Definition: convert_srgb.I:80
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition: dcindent.cxx:22
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition: pbitops.I:175
int get_next_higher_bit(unsigned short x)
Returns the smallest power of 2 greater than x.
Definition: pbitops.I:328
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
string upcase(const string &s)
Returns the input string with all lowercase letters converted to uppercase.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PT(Texture) Texture
Constructs a new Texture object from the txo file.
Definition: texture.cxx:860
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.