Panda3D
texture.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file texture.cxx
10 * @author mike
11 * @date 1997-01-09
12 * @author fperazzi, PandaSE
13 * @date 2010-04-29
14 */
15
16#include "pandabase.h"
17#include "texture.h"
18#include "config_gobj.h"
19#include "config_putil.h"
20#include "texturePool.h"
21#include "textureContext.h"
22#include "bamCache.h"
23#include "bamCacheRecord.h"
24#include "datagram.h"
25#include "datagramIterator.h"
26#include "bamReader.h"
27#include "bamWriter.h"
28#include "string_utils.h"
30#include "pnmImage.h"
31#include "pnmReader.h"
32#include "pfmFile.h"
33#include "pnmFileTypeRegistry.h"
34#include "virtualFileSystem.h"
35#include "datagramInputFile.h"
36#include "datagramOutputFile.h"
37#include "bam.h"
38#include "zStream.h"
39#include "indent.h"
40#include "cmath.h"
41#include "pStatTimer.h"
42#include "pbitops.h"
43#include "streamReader.h"
44#include "texturePeeker.h"
45#include "convert_srgb.h"
46
47#ifdef HAVE_SQUISH
48#include <squish.h>
49#endif // HAVE_SQUISH
50
51#include <stddef.h>
52
53using std::endl;
54using std::istream;
55using std::max;
56using std::min;
57using std::ostream;
58using std::string;
59using std::swap;
60
62("texture-quality-level", Texture::QL_normal,
63 PRC_DESC("This specifies a global quality level for all textures. You "
64 "may specify either fastest, normal, or best. This actually "
65 "affects the meaning of Texture::set_quality_level(QL_default), "
66 "so it may be overridden on a per-texture basis. This generally "
67 "only has an effect when using the tinydisplay software renderer; "
68 "it has little or no effect on normal, hardware-accelerated "
69 "renderers. See Texture::set_quality_level()."));
70
71PStatCollector Texture::_texture_read_pcollector("*:Texture:Read");
72TypeHandle Texture::_type_handle;
73TypeHandle Texture::CData::_type_handle;
74AutoTextureScale Texture::_textures_power_2 = ATS_unspecified;
75
76// Stuff to read and write DDS files.
77
78// little-endian, of course
79#define DDS_MAGIC 0x20534444
80
81
82// DDS_header.dwFlags
83#define DDSD_CAPS 0x00000001
84#define DDSD_HEIGHT 0x00000002
85#define DDSD_WIDTH 0x00000004
86#define DDSD_PITCH 0x00000008
87#define DDSD_PIXELFORMAT 0x00001000
88#define DDSD_MIPMAPCOUNT 0x00020000
89#define DDSD_LINEARSIZE 0x00080000
90#define DDSD_DEPTH 0x00800000
91
92// DDS_header.sPixelFormat.dwFlags
93#define DDPF_ALPHAPIXELS 0x00000001
94#define DDPF_FOURCC 0x00000004
95#define DDPF_INDEXED 0x00000020
96#define DDPF_RGB 0x00000040
97
98// DDS_header.sCaps.dwCaps1
99#define DDSCAPS_COMPLEX 0x00000008
100#define DDSCAPS_TEXTURE 0x00001000
101#define DDSCAPS_MIPMAP 0x00400000
102
103// DDS_header.sCaps.dwCaps2
104#define DDSCAPS2_CUBEMAP 0x00000200
105#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
106#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
107#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
108#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
109#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
110#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
111#define DDSCAPS2_VOLUME 0x00200000
112
113struct DDSPixelFormat {
114 unsigned int pf_size;
115 unsigned int pf_flags;
116 unsigned int four_cc;
117 unsigned int rgb_bitcount;
118 unsigned int r_mask;
119 unsigned int g_mask;
120 unsigned int b_mask;
121 unsigned int a_mask;
122};
123
124struct DDSCaps2 {
125 unsigned int caps1;
126 unsigned int caps2;
127 unsigned int ddsx;
128};
129
130struct DDSHeader {
131 unsigned int dds_magic;
132 unsigned int dds_size;
133 unsigned int dds_flags;
134 unsigned int height;
135 unsigned int width;
136 unsigned int pitch;
137 unsigned int depth;
138 unsigned int num_levels;
139
140 DDSPixelFormat pf;
141 DDSCaps2 caps;
142};
143
144// Stuff to read KTX files.
145enum KTXType {
146 KTX_BYTE = 0x1400,
147 KTX_UNSIGNED_BYTE = 0x1401,
148 KTX_SHORT = 0x1402,
149 KTX_UNSIGNED_SHORT = 0x1403,
150 KTX_INT = 0x1404,
151 KTX_UNSIGNED_INT = 0x1405,
152 KTX_FLOAT = 0x1406,
153 KTX_HALF_FLOAT = 0x140B,
154 KTX_UNSIGNED_BYTE_3_3_2 = 0x8032,
155 KTX_UNSIGNED_SHORT_4_4_4_4 = 0x8033,
156 KTX_UNSIGNED_SHORT_5_5_5_1 = 0x8034,
157 KTX_UNSIGNED_INT_8_8_8_8 = 0x8035,
158 KTX_UNSIGNED_INT_10_10_10_2 = 0x8036,
159 KTX_UNSIGNED_BYTE_2_3_3_REV = 0x8362,
160 KTX_UNSIGNED_SHORT_5_6_5 = 0x8363,
161 KTX_UNSIGNED_SHORT_5_6_5_REV = 0x8364,
162 KTX_UNSIGNED_SHORT_4_4_4_4_REV = 0x8365,
163 KTX_UNSIGNED_SHORT_1_5_5_5_REV = 0x8366,
164 KTX_UNSIGNED_INT_8_8_8_8_REV = 0x8367,
165 KTX_UNSIGNED_INT_2_10_10_10_REV = 0x8368,
166 KTX_UNSIGNED_INT_24_8 = 0x84FA,
167 KTX_UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
168 KTX_UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
169 KTX_FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
170};
171
172enum KTXFormat {
173 KTX_ALPHA = 0x1906,
174 KTX_ALPHA12 = 0x803D,
175 KTX_ALPHA16 = 0x803E,
176 KTX_ALPHA16_SNORM = 0x9018,
177 KTX_ALPHA4 = 0x803B,
178 KTX_ALPHA8 = 0x803C,
179 KTX_ALPHA8_SNORM = 0x9014,
180 KTX_ALPHA_SNORM = 0x9010,
181 KTX_BGR = 0x80E0,
182 KTX_BGR_INTEGER = 0x8D9A,
183 KTX_BGRA = 0x80E1,
184 KTX_BGRA_INTEGER = 0x8D9B,
185 KTX_BLUE = 0x1905,
186 KTX_BLUE_INTEGER = 0x8D96,
187 KTX_COLOR_INDEX = 0x1900,
188 KTX_DEPTH24_STENCIL8 = 0x88F0,
189 KTX_DEPTH32F_STENCIL8 = 0x8CAD,
190 KTX_DEPTH_COMPONENT = 0x1902,
191 KTX_DEPTH_COMPONENT16 = 0x81A5,
192 KTX_DEPTH_COMPONENT24 = 0x81A6,
193 KTX_DEPTH_COMPONENT32 = 0x81A7,
194 KTX_DEPTH_COMPONENT32F = 0x8CAC,
195 KTX_DEPTH_STENCIL = 0x84F9,
196 KTX_GREEN = 0x1904,
197 KTX_GREEN_INTEGER = 0x8D95,
198 KTX_INTENSITY = 0x8049,
199 KTX_INTENSITY12 = 0x804C,
200 KTX_INTENSITY16 = 0x804D,
201 KTX_INTENSITY16_SNORM = 0x901B,
202 KTX_INTENSITY4 = 0x804A,
203 KTX_INTENSITY8 = 0x804B,
204 KTX_INTENSITY8_SNORM = 0x9017,
205 KTX_INTENSITY_SNORM = 0x9013,
206 KTX_LUMINANCE = 0x1909,
207 KTX_LUMINANCE12 = 0x8041,
208 KTX_LUMINANCE12_ALPHA12 = 0x8047,
209 KTX_LUMINANCE12_ALPHA4 = 0x8046,
210 KTX_LUMINANCE16 = 0x8042,
211 KTX_LUMINANCE16_ALPHA16 = 0x8048,
212 KTX_LUMINANCE16_ALPHA16_SNORM = 0x901A,
213 KTX_LUMINANCE16_SNORM = 0x9019,
214 KTX_LUMINANCE4 = 0x803F,
215 KTX_LUMINANCE4_ALPHA4 = 0x8043,
216 KTX_LUMINANCE6_ALPHA2 = 0x8044,
217 KTX_LUMINANCE8 = 0x8040,
218 KTX_LUMINANCE8_ALPHA8 = 0x8045,
219 KTX_LUMINANCE8_ALPHA8_SNORM = 0x9016,
220 KTX_LUMINANCE8_SNORM = 0x9015,
221 KTX_LUMINANCE_ALPHA = 0x190A,
222 KTX_LUMINANCE_ALPHA_SNORM = 0x9012,
223 KTX_LUMINANCE_SNORM = 0x9011,
224 KTX_R11F_G11F_B10F = 0x8C3A,
225 KTX_R16 = 0x822A,
226 KTX_R16_SNORM = 0x8F98,
227 KTX_R16F = 0x822D,
228 KTX_R16I = 0x8233,
229 KTX_R16UI = 0x8234,
230 KTX_R32F = 0x822E,
231 KTX_R32I = 0x8235,
232 KTX_R32UI = 0x8236,
233 KTX_R3_G3_B2 = 0x2A10,
234 KTX_R8 = 0x8229,
235 KTX_R8_SNORM = 0x8F94,
236 KTX_R8I = 0x8231,
237 KTX_R8UI = 0x8232,
238 KTX_RED = 0x1903,
239 KTX_RED_INTEGER = 0x8D94,
240 KTX_RED_SNORM = 0x8F90,
241 KTX_RG = 0x8227,
242 KTX_RG16 = 0x822C,
243 KTX_RG16_SNORM = 0x8F99,
244 KTX_RG16F = 0x822F,
245 KTX_RG16I = 0x8239,
246 KTX_RG16UI = 0x823A,
247 KTX_RG32F = 0x8230,
248 KTX_RG32I = 0x823B,
249 KTX_RG32UI = 0x823C,
250 KTX_RG8 = 0x822B,
251 KTX_RG8_SNORM = 0x8F95,
252 KTX_RG8I = 0x8237,
253 KTX_RG8UI = 0x8238,
254 KTX_RG_INTEGER = 0x8228,
255 KTX_RG_SNORM = 0x8F91,
256 KTX_RGB = 0x1907,
257 KTX_RGB10 = 0x8052,
258 KTX_RGB10_A2 = 0x8059,
259 KTX_RGB12 = 0x8053,
260 KTX_RGB16 = 0x8054,
261 KTX_RGB16_SNORM = 0x8F9A,
262 KTX_RGB16F = 0x881B,
263 KTX_RGB16I = 0x8D89,
264 KTX_RGB16UI = 0x8D77,
265 KTX_RGB2 = 0x804E,
266 KTX_RGB32F = 0x8815,
267 KTX_RGB32I = 0x8D83,
268 KTX_RGB32UI = 0x8D71,
269 KTX_RGB4 = 0x804F,
270 KTX_RGB5 = 0x8050,
271 KTX_RGB5_A1 = 0x8057,
272 KTX_RGB8 = 0x8051,
273 KTX_RGB8_SNORM = 0x8F96,
274 KTX_RGB8I = 0x8D8F,
275 KTX_RGB8UI = 0x8D7D,
276 KTX_RGB9_E5 = 0x8C3D,
277 KTX_RGB_INTEGER = 0x8D98,
278 KTX_RGB_SNORM = 0x8F92,
279 KTX_RGBA = 0x1908,
280 KTX_RGBA12 = 0x805A,
281 KTX_RGBA16 = 0x805B,
282 KTX_RGBA16_SNORM = 0x8F9B,
283 KTX_RGBA16F = 0x881A,
284 KTX_RGBA16I = 0x8D88,
285 KTX_RGBA16UI = 0x8D76,
286 KTX_RGBA2 = 0x8055,
287 KTX_RGBA32F = 0x8814,
288 KTX_RGBA32I = 0x8D82,
289 KTX_RGBA32UI = 0x8D70,
290 KTX_RGBA4 = 0x8056,
291 KTX_RGBA8 = 0x8058,
292 KTX_RGBA8_SNORM = 0x8F97,
293 KTX_RGBA8I = 0x8D8E,
294 KTX_RGBA8UI = 0x8D7C,
295 KTX_RGBA_INTEGER = 0x8D99,
296 KTX_RGBA_SNORM = 0x8F93,
297 KTX_SLUMINANCE = 0x8C46,
298 KTX_SLUMINANCE8 = 0x8C47,
299 KTX_SLUMINANCE8_ALPHA8 = 0x8C45,
300 KTX_SLUMINANCE_ALPHA = 0x8C44,
301 KTX_SRGB = 0x8C40,
302 KTX_SRGB8 = 0x8C41,
303 KTX_SRGB8_ALPHA8 = 0x8C43,
304 KTX_SRGB_ALPHA = 0x8C42,
305 KTX_STENCIL_INDEX = 0x1901,
306 KTX_STENCIL_INDEX1 = 0x8D46,
307 KTX_STENCIL_INDEX16 = 0x8D49,
308 KTX_STENCIL_INDEX4 = 0x8D47,
309 KTX_STENCIL_INDEX8 = 0x8D48,
310};
311
312enum KTXCompressedFormat {
313 KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2 = 0x8C72,
314 KTX_COMPRESSED_LUMINANCE_LATC1 = 0x8C70,
315 KTX_COMPRESSED_R11_EAC = 0x9270,
316 KTX_COMPRESSED_RED = 0x8225,
317 KTX_COMPRESSED_RED_RGTC1 = 0x8DBB,
318 KTX_COMPRESSED_RG = 0x8226,
319 KTX_COMPRESSED_RG11_EAC = 0x9272,
320 KTX_COMPRESSED_RG_RGTC2 = 0x8DBD,
321 KTX_COMPRESSED_RGB = 0x84ED,
322 KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
323 KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276,
324 KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
325 KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
326 KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
327 KTX_COMPRESSED_RGB_PVRTC_2BPPV1_IMG = 0x8C01,
328 KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
329 KTX_COMPRESSED_RGB_S3TC_DXT1 = 0x83F0,
330 KTX_COMPRESSED_RGBA = 0x84EE,
331 KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
332 KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
333 KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
334 KTX_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG = 0x8C03,
335 KTX_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG = 0x9137,
336 KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
337 KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
338 KTX_COMPRESSED_RGBA_S3TC_DXT1 = 0x83F1,
339 KTX_COMPRESSED_RGBA_S3TC_DXT3 = 0x83F2,
340 KTX_COMPRESSED_RGBA_S3TC_DXT5 = 0x83F3,
341 KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 = 0x8C73,
342 KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1 = 0x8C71,
343 KTX_COMPRESSED_SIGNED_R11_EAC = 0x9271,
344 KTX_COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
345 KTX_COMPRESSED_SIGNED_RG11_EAC = 0x9273,
346 KTX_COMPRESSED_SIGNED_RG_RGTC2 = 0x8DBE,
347 KTX_COMPRESSED_SLUMINANCE = 0x8C4A,
348 KTX_COMPRESSED_SLUMINANCE_ALPHA = 0x8C4B,
349 KTX_COMPRESSED_SRGB = 0x8C48,
350 KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279,
351 KTX_COMPRESSED_SRGB8_ETC2 = 0x9275,
352 KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277,
353 KTX_COMPRESSED_SRGB_ALPHA = 0x8C49,
354 KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
355 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1 = 0x8A56,
356 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2 = 0x93F0,
357 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1 = 0x8A57,
358 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2 = 0x93F1,
359 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1 = 0x8C4D,
360 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3 = 0x8C4E,
361 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5 = 0x8C4F,
362 KTX_COMPRESSED_SRGB_PVRTC_2BPPV1 = 0x8A54,
363 KTX_COMPRESSED_SRGB_PVRTC_4BPPV1 = 0x8A55,
364 KTX_COMPRESSED_SRGB_S3TC_DXT1 = 0x8C4C,
365 KTX_ETC1_RGB8 = 0x8D64,
366 KTX_ETC1_SRGB8 = 0x88EE,
367};
368
369/**
370 * Constructs an empty texture. The default is to set up the texture as an
371 * empty 2-d texture; follow up with one of the variants of setup_texture() if
372 * this is not what you want.
373 */
375Texture(const string &name) :
376 Namable(name),
377 _lock(name),
378 _cvar(_lock)
379{
380 _reloading = false;
381
382 CDWriter cdata(_cycler, true);
383 do_set_format(cdata, F_rgb);
384 do_set_component_type(cdata, T_unsigned_byte);
385}
386
387/**
388 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
389 */
391Texture(const Texture &copy) :
392 Namable(copy),
393 _cycler(copy._cycler),
394 _lock(copy.get_name()),
395 _cvar(_lock)
396{
397 _reloading = false;
398}
399
400/**
401 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
402 */
403void Texture::
404operator = (const Texture &copy) {
405 Namable::operator = (copy);
406 _cycler = copy._cycler;
407}
408
409/**
410 *
411 */
412Texture::
413~Texture() {
414 release_all();
415 nassertv(!_reloading);
416}
417
418/**
419 * Generates a special cube map image in the texture that can be used to apply
420 * bump mapping effects: for each texel in the cube map that is indexed by the
421 * 3-d texture coordinates (x, y, z), the resulting value is the normalized
422 * vector (x, y, z) (compressed from -1..1 into 0..1).
423 */
426 CDWriter cdata(_cycler, true);
427 do_setup_texture(cdata, TT_cube_map, size, size, 6, T_unsigned_byte, F_rgb);
428 PTA_uchar image = do_make_ram_image(cdata);
429 cdata->_keep_ram_image = true;
430
431 cdata->inc_image_modified();
432 cdata->inc_properties_modified();
433
434 PN_stdfloat half_size = (PN_stdfloat)size * 0.5f;
435 PN_stdfloat center = half_size - 0.5f;
436
437 LMatrix4 scale
438 (127.5f, 0.0f, 0.0f, 0.0f,
439 0.0f, 127.5f, 0.0f, 0.0f,
440 0.0f, 0.0f, 127.5f, 0.0f,
441 127.5f, 127.5f, 127.5f, 1.0f);
442
443 unsigned char *p = image;
444 int xi, yi;
445
446 // Page 0: positive X.
447 for (yi = 0; yi < size; ++yi) {
448 for (xi = 0; xi < size; ++xi) {
449 LVector3 vec(half_size, center - yi, center - xi);
450 vec.normalize();
451 vec = scale.xform_point(vec);
452
453 *p++ = (unsigned char)vec[2];
454 *p++ = (unsigned char)vec[1];
455 *p++ = (unsigned char)vec[0];
456 }
457 }
458
459 // Page 1: negative X.
460 for (yi = 0; yi < size; ++yi) {
461 for (xi = 0; xi < size; ++xi) {
462 LVector3 vec(-half_size, center - yi, xi - center);
463 vec.normalize();
464 vec = scale.xform_point(vec);
465 *p++ = (unsigned char)vec[2];
466 *p++ = (unsigned char)vec[1];
467 *p++ = (unsigned char)vec[0];
468 }
469 }
470
471 // Page 2: positive Y.
472 for (yi = 0; yi < size; ++yi) {
473 for (xi = 0; xi < size; ++xi) {
474 LVector3 vec(xi - center, half_size, yi - center);
475 vec.normalize();
476 vec = scale.xform_point(vec);
477 *p++ = (unsigned char)vec[2];
478 *p++ = (unsigned char)vec[1];
479 *p++ = (unsigned char)vec[0];
480 }
481 }
482
483 // Page 3: negative Y.
484 for (yi = 0; yi < size; ++yi) {
485 for (xi = 0; xi < size; ++xi) {
486 LVector3 vec(xi - center, -half_size, center - yi);
487 vec.normalize();
488 vec = scale.xform_point(vec);
489 *p++ = (unsigned char)vec[2];
490 *p++ = (unsigned char)vec[1];
491 *p++ = (unsigned char)vec[0];
492 }
493 }
494
495 // Page 4: positive Z.
496 for (yi = 0; yi < size; ++yi) {
497 for (xi = 0; xi < size; ++xi) {
498 LVector3 vec(xi - center, center - yi, half_size);
499 vec.normalize();
500 vec = scale.xform_point(vec);
501 *p++ = (unsigned char)vec[2];
502 *p++ = (unsigned char)vec[1];
503 *p++ = (unsigned char)vec[0];
504 }
505 }
506
507 // Page 5: negative Z.
508 for (yi = 0; yi < size; ++yi) {
509 for (xi = 0; xi < size; ++xi) {
510 LVector3 vec(center - xi, center - yi, -half_size);
511 vec.normalize();
512 vec = scale.xform_point(vec);
513 *p++ = (unsigned char)vec[2];
514 *p++ = (unsigned char)vec[1];
515 *p++ = (unsigned char)vec[0];
516 }
517 }
518}
519
520/**
521 * Generates a special 256x1 1-d texture that can be used to apply an
522 * arbitrary alpha scale to objects by judicious use of texture matrix. The
523 * texture is a gradient, with an alpha of 0 on the left (U = 0), and 255 on
524 * the right (U = 1).
525 */
528 CDWriter cdata(_cycler, true);
529 do_setup_texture(cdata, TT_1d_texture, 256, 1, 1, T_unsigned_byte, F_alpha);
530 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
531 cdata->_default_sampler.set_minfilter(SamplerState::FT_nearest);
532 cdata->_default_sampler.set_magfilter(SamplerState::FT_nearest);
533
534 cdata->_compression = CM_off;
535
536 cdata->inc_image_modified();
537 cdata->inc_properties_modified();
538
539 PTA_uchar image = do_make_ram_image(cdata);
540 cdata->_keep_ram_image = true;
541
542 unsigned char *p = image;
543 for (int xi = 0; xi < 256; ++xi) {
544 *p++ = xi;
545 }
546}
547
548/**
549 * Reads the named filename into the texture.
550 */
552read(const Filename &fullpath, const LoaderOptions &options) {
553 CDWriter cdata(_cycler, true);
554 do_clear(cdata);
555 cdata->inc_properties_modified();
556 cdata->inc_image_modified();
557 return do_read(cdata, fullpath, Filename(), 0, 0, 0, 0, false, false,
558 options, nullptr);
559}
560
561/**
562 * Combine a 3-component image with a grayscale image to get a 4-component
563 * image.
564 *
565 * See the description of the full-parameter read() method for the meaning of
566 * the primary_file_num_channels and alpha_file_channel parameters.
567 */
569read(const Filename &fullpath, const Filename &alpha_fullpath,
570 int primary_file_num_channels, int alpha_file_channel,
571 const LoaderOptions &options) {
572 CDWriter cdata(_cycler, true);
573 do_clear(cdata);
574 cdata->inc_properties_modified();
575 cdata->inc_image_modified();
576 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
577 alpha_file_channel, 0, 0, false, false,
578 options, nullptr);
579}
580
581/**
582 * Reads a single file into a single page or mipmap level, or automatically
583 * reads a series of files into a series of pages and/or mipmap levels.
584 *
585 * See the description of the full-parameter read() method for the meaning of
586 * the various parameters.
587 */
589read(const Filename &fullpath, int z, int n,
590 bool read_pages, bool read_mipmaps,
591 const LoaderOptions &options) {
592 CDWriter cdata(_cycler, true);
593 cdata->inc_properties_modified();
594 cdata->inc_image_modified();
595 return do_read(cdata, fullpath, Filename(), 0, 0, z, n, read_pages, read_mipmaps,
596 options, nullptr);
597}
598
599/**
600 * Reads the texture from the indicated filename. If
601 * primary_file_num_channels is not 0, it specifies the number of components
602 * to downgrade the image to if it is greater than this number.
603 *
604 * If the filename has the extension .txo, this implicitly reads a texture
605 * object instead of a filename (which replaces all of the texture
606 * properties). In this case, all the rest of the parameters are ignored, and
607 * the filename should not contain any hash marks; just the one named file
608 * will be read, since a single .txo file can contain all pages and mipmaps
609 * necessary to define a texture.
610 *
611 * If alpha_fullpath is not empty, it specifies the name of a file from which
612 * to retrieve the alpha. In this case, alpha_file_channel represents the
613 * numeric channel of this image file to use as the resulting texture's alpha
614 * channel; usually, this is 0 to indicate the grayscale combination of r, g,
615 * b; or it may be a one-based channel number, e.g. 1 for the red channel, 2
616 * for the green channel, and so on.
617 *
618 * If read pages is false, then z indicates the page number into which this
619 * image will be assigned. Normally this is 0 for the first (or only) page of
620 * the texture. 3-D textures have one page for each level of depth, and cube
621 * map textures always have six pages.
622 *
623 * If read_pages is true, multiple images will be read at once, one for each
624 * page of a cube map or a 3-D texture. In this case, the filename should
625 * contain a sequence of one or more hash marks ("#") which will be filled in
626 * with the z value of each page, zero-based. In this case, the z parameter
627 * indicates the maximum z value that will be loaded, or 0 to load all
628 * filenames that exist.
629 *
630 * If read_mipmaps is false, then n indicates the mipmap level to which this
631 * image will be assigned. Normally this is 0 for the base texture image, but
632 * it is possible to load custom mipmap levels into the later images. After
633 * the base texture image is loaded (thus defining the size of the texture),
634 * you can call get_expected_num_mipmap_levels() to determine the maximum
635 * sensible value for n.
636 *
637 * If read_mipmaps is true, multiple images will be read as above, but this
638 * time the images represent the different mipmap levels of the texture image.
639 * In this case, the n parameter indicates the maximum n value that will be
640 * loaded, or 0 to load all filenames that exist (up to the expected number of
641 * mipmap levels).
642 *
643 * If both read_pages and read_mipmaps is true, then both sequences will be
644 * read; the filename should contain two sequences of hash marks, separated by
645 * some character such as a hyphen, underscore, or dot. The first hash mark
646 * sequence will be filled in with the mipmap level, while the second hash
647 * mark sequence will be the page index.
648 *
649 * This method implicitly sets keep_ram_image to false.
650 */
652read(const Filename &fullpath, const Filename &alpha_fullpath,
653 int primary_file_num_channels, int alpha_file_channel,
654 int z, int n, bool read_pages, bool read_mipmaps,
655 BamCacheRecord *record,
656 const LoaderOptions &options) {
657 CDWriter cdata(_cycler, true);
658 cdata->inc_properties_modified();
659 cdata->inc_image_modified();
660 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
661 alpha_file_channel, z, n, read_pages, read_mipmaps,
662 options, record);
663}
664
665/**
666 * Estimates the amount of texture memory that will be consumed by loading
667 * this texture. This returns a value that is not specific to any particular
668 * graphics card or driver; it tries to make a reasonable assumption about how
669 * a driver will load the texture. It does not account for texture
670 * compression or anything fancy. This is mainly useful for debugging and
671 * reporting purposes.
672 *
673 * Returns a value in bytes.
674 */
677 CDReader cdata(_cycler);
678 size_t pixels = cdata->_x_size * cdata->_y_size * cdata->_z_size;
679
680 size_t bpp = 0;
681 switch (cdata->_format) {
682 case Texture::F_rgb332:
683 bpp = 1;
684 break;
685
686 case Texture::F_alpha:
687 case Texture::F_red:
688 case Texture::F_green:
689 case Texture::F_blue:
690 case Texture::F_luminance:
691 case Texture::F_sluminance:
692 case Texture::F_r8i:
693 bpp = 1;
694 break;
695
696 case Texture::F_luminance_alpha:
697 case Texture::F_luminance_alphamask:
698 case Texture::F_sluminance_alpha:
699 case Texture::F_rgba4:
700 case Texture::F_rgb5:
701 case Texture::F_rgba5:
702 case Texture::F_rg:
703 bpp = 2;
704 break;
705
706 case Texture::F_rgba:
707 case Texture::F_rgbm:
708 case Texture::F_rgb:
709 case Texture::F_srgb:
710 // Most of the above formats have only 3 bytes, but they are most likely
711 // to get padded by the driver
712 bpp = 4;
713 break;
714
715 case Texture::F_color_index:
716 case Texture::F_rgb8:
717 case Texture::F_rgba8:
718 case Texture::F_srgb_alpha:
719 case Texture::F_rgb8i:
720 case Texture::F_rgba8i:
721 bpp = 4;
722 break;
723
724 case Texture::F_depth_stencil:
725 bpp = 4;
726 break;
727
728 case Texture::F_depth_component:
729 case Texture::F_depth_component16:
730 bpp = 2;
731 break;
732
733 case Texture::F_depth_component24: // Gets padded
734 case Texture::F_depth_component32:
735 bpp = 4;
736 break;
737
738 case Texture::F_rgba12:
739 case Texture::F_rgb12:
740 bpp = 8;
741 break;
742
743 case Texture::F_rgba32:
744 case Texture::F_rgba32i:
745 bpp = 16;
746 break;
747
748 case Texture::F_r16:
749 case Texture::F_r16i:
750 case Texture::F_rg8i:
751 bpp = 2;
752 break;
753 case Texture::F_rg16:
754 case Texture::F_rg16i:
755 bpp = 4;
756 break;
757 case Texture::F_rgb16:
758 case Texture::F_rgb16i:
759 case Texture::F_rgba16:
760 case Texture::F_rgba16i:
761 bpp = 8;
762 break;
763
764 case Texture::F_r32i:
765 case Texture::F_r32:
766 bpp = 4;
767 break;
768
769 case Texture::F_rg32:
770 case Texture::F_rg32i:
771 bpp = 8;
772 break;
773
774 case Texture::F_rgb32:
775 case Texture::F_rgb32i:
776 bpp = 16;
777 break;
778
779 case Texture::F_r11_g11_b10:
780 case Texture::F_rgb9_e5:
781 case Texture::F_rgb10_a2:
782 bpp = 4;
783 break;
784 }
785
786 if (bpp == 0) {
787 bpp = 4;
788 gobj_cat.warning() << "Unhandled format in estimate_texture_memory(): "
789 << cdata->_format << "\n";
790 }
791
792 size_t bytes = pixels * bpp;
793 if (uses_mipmaps()) {
794 bytes = (bytes * 4) / 3;
795 }
796
797 return bytes;
798}
799
800/**
801 * Records an arbitrary object in the Texture, associated with a specified
802 * key. The object may later be retrieved by calling get_aux_data() with the
803 * same key.
804 *
805 * These data objects are not recorded to a bam or txo file.
806 */
807void Texture::
808set_aux_data(const string &key, TypedReferenceCount *aux_data) {
809 MutexHolder holder(_lock);
810 _aux_data[key] = aux_data;
811}
812
813/**
814 * Removes a record previously recorded via set_aux_data().
815 */
816void Texture::
817clear_aux_data(const string &key) {
818 MutexHolder holder(_lock);
819 _aux_data.erase(key);
820}
821
822/**
823 * Returns a record previously recorded via set_aux_data(). Returns NULL if
824 * there was no record associated with the indicated key.
825 */
827get_aux_data(const string &key) const {
828 MutexHolder holder(_lock);
829 AuxData::const_iterator di;
830 di = _aux_data.find(key);
831 if (di != _aux_data.end()) {
832 return (*di).second;
833 }
834 return nullptr;
835}
836
837/**
838 * Reads the texture from a Panda texture object. This defines the complete
839 * Texture specification, including the image data as well as all texture
840 * properties. This only works if the txo file contains a static Texture
841 * image, as opposed to a subclass of Texture such as a movie texture.
842 *
843 * Pass a real filename if it is available, or empty string if it is not.
844 */
846read_txo(istream &in, const string &filename) {
847 CDWriter cdata(_cycler, true);
848 cdata->inc_properties_modified();
849 cdata->inc_image_modified();
850 return do_read_txo(cdata, in, filename);
851}
852
853/**
854 * Constructs a new Texture object from the txo file. This is similar to
855 * Texture::read_txo(), but it constructs and returns a new object, which
856 * allows it to return a subclass of Texture (for instance, a movie texture).
857 *
858 * Pass a real filename if it is available, or empty string if it is not.
859 */
860PT(Texture) Texture::
861make_from_txo(istream &in, const string &filename) {
863
864 if (!din.open(in, filename)) {
865 gobj_cat.error()
866 << "Could not read texture object: " << filename << "\n";
867 return nullptr;
868 }
869
870 string head;
871 if (!din.read_header(head, _bam_header.size())) {
872 gobj_cat.error()
873 << filename << " is not a texture object file.\n";
874 return nullptr;
875 }
876
877 if (head != _bam_header) {
878 gobj_cat.error()
879 << filename << " is not a texture object file.\n";
880 return nullptr;
881 }
882
883 BamReader reader(&din);
884 if (!reader.init()) {
885 return nullptr;
886 }
887
888 TypedWritable *object = reader.read_object();
889
890 if (object != nullptr &&
891 object->is_exact_type(BamCacheRecord::get_class_type())) {
892 // Here's a special case: if the first object in the file is a
893 // BamCacheRecord, it's really a cache data file and not a true txo file;
894 // but skip over the cache data record and let the user treat it like an
895 // ordinary txo file.
896 object = reader.read_object();
897 }
898
899 if (object == nullptr) {
900 gobj_cat.error()
901 << "Texture object " << filename << " is empty.\n";
902 return nullptr;
903
904 } else if (!object->is_of_type(Texture::get_class_type())) {
905 gobj_cat.error()
906 << "Texture object " << filename << " contains a "
907 << object->get_type() << ", not a Texture.\n";
908 return nullptr;
909 }
910
911 PT(Texture) other = DCAST(Texture, object);
912 if (!reader.resolve()) {
913 gobj_cat.error()
914 << "Unable to fully resolve texture object file.\n";
915 return nullptr;
916 }
917
918 return other;
919}
920
921/**
922 * Writes the texture to a Panda texture object. This defines the complete
923 * Texture specification, including the image data as well as all texture
924 * properties.
925 *
926 * The filename is just for reference.
927 */
929write_txo(ostream &out, const string &filename) const {
930 CDReader cdata(_cycler);
931 return do_write_txo(cdata, out, filename);
932}
933
934/**
935 * Reads the texture from a DDS file object. This is a Microsoft-defined file
936 * format; it is similar in principle to a txo object, in that it is designed
937 * to contain the texture image in a form as similar as possible to its
938 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
939 * on.
940 *
941 * As with read_txo, the filename is just for reference.
942 */
944read_dds(istream &in, const string &filename, bool header_only) {
945 CDWriter cdata(_cycler, true);
946 cdata->inc_properties_modified();
947 cdata->inc_image_modified();
948 return do_read_dds(cdata, in, filename, header_only);
949}
950
951/**
952 * Reads the texture from a KTX file object. This is a Khronos-defined file
953 * format; it is similar in principle to a dds object, in that it is designed
954 * to contain the texture image in a form as similar as possible to its
955 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
956 * on.
957 *
958 * As with read_dds, the filename is just for reference.
959 */
961read_ktx(istream &in, const string &filename, bool header_only) {
962 CDWriter cdata(_cycler, true);
963 cdata->inc_properties_modified();
964 cdata->inc_image_modified();
965 return do_read_ktx(cdata, in, filename, header_only);
966}
967
968/**
969 * Loads a texture whose filename is derived by concatenating a suffix to the
970 * filename of this texture. May return NULL, for example, if this texture
971 * doesn't have a filename.
972 */
974load_related(const InternalName *suffix) const {
975 MutexHolder holder(_lock);
976 CDReader cdata(_cycler);
977
978 RelatedTextures::const_iterator ti;
979 ti = _related_textures.find(suffix);
980 if (ti != _related_textures.end()) {
981 return (*ti).second;
982 }
983 if (cdata->_fullpath.empty()) {
984 return nullptr;
985 }
986 Filename main = cdata->_fullpath;
987 main.set_basename_wo_extension(main.get_basename_wo_extension() +
988 suffix->get_name());
989 PT(Texture) res;
990 if (!cdata->_alpha_fullpath.empty()) {
991 Filename alph = cdata->_alpha_fullpath;
993 suffix->get_name());
995 if (vfs->exists(alph)) {
996 // The alpha variant of the filename, with the suffix, exists. Use it
997 // to load the texture.
998 res = TexturePool::load_texture(main, alph,
999 cdata->_primary_file_num_channels,
1000 cdata->_alpha_file_channel, false);
1001 } else {
1002 // If the alpha variant of the filename doesn't exist, just go ahead and
1003 // load the related texture without alpha.
1004 res = TexturePool::load_texture(main);
1005 }
1006
1007 } else {
1008 // No alpha filename--just load the single file. It doesn't necessarily
1009 // have the same number of channels as this one.
1010 res = TexturePool::load_texture(main);
1011 }
1012
1013 // I'm casting away the const-ness of 'this' because this field is only a
1014 // cache.
1015 ((Texture *)this)->_related_textures.insert(RelatedTextures::value_type(suffix, res));
1016 return res;
1017}
1018
1019/**
1020 * Replaces the current system-RAM image with the new data, converting it
1021 * first if necessary from the indicated component-order format. See
1022 * get_ram_image_as() for specifications about the format. This method cannot
1023 * support compressed image data or sub-pages; use set_ram_image() for that.
1024 */
1026set_ram_image_as(CPTA_uchar image, const string &supplied_format) {
1027 CDWriter cdata(_cycler, true);
1028
1029 string format = upcase(supplied_format);
1030
1031 // Make sure we can grab something that's uncompressed.
1032 size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
1033 (size_t)cdata->_z_size * (size_t)cdata->_num_views;
1034 nassertv(image.size() == (size_t)(cdata->_component_width * format.size() * imgsize));
1035
1036 // Check if the format is already what we have internally.
1037 if ((cdata->_num_components == 1 && format.size() == 1) ||
1038 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
1039 (cdata->_num_components == 3 && format == "BGR") ||
1040 (cdata->_num_components == 4 && format == "BGRA")) {
1041 // The format string is already our format, so we just need to copy it.
1042 do_set_ram_image(cdata, image);
1043 return;
1044 }
1045
1046 // Create a new empty array that can hold our image.
1047 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * cdata->_num_components * cdata->_component_width, get_class_type());
1048
1049 // These ifs are for optimization of commonly used image types.
1050 if (cdata->_component_width == 1) {
1051 if (format == "RGBA" && cdata->_num_components == 4) {
1052 imgsize *= 4;
1053 for (int p = 0; p < imgsize; p += 4) {
1054 newdata[p + 2] = image[p ];
1055 newdata[p + 1] = image[p + 1];
1056 newdata[p ] = image[p + 2];
1057 newdata[p + 3] = image[p + 3];
1058 }
1059 do_set_ram_image(cdata, newdata);
1060 return;
1061 }
1062 if (format == "RGB" && cdata->_num_components == 3) {
1063 imgsize *= 3;
1064 for (int p = 0; p < imgsize; p += 3) {
1065 newdata[p + 2] = image[p ];
1066 newdata[p + 1] = image[p + 1];
1067 newdata[p ] = image[p + 2];
1068 }
1069 do_set_ram_image(cdata, newdata);
1070 return;
1071 }
1072 if (format == "A" && cdata->_num_components != 3) {
1073 // We can generally rely on alpha to be the last component.
1074 int component = cdata->_num_components - 1;
1075 for (size_t p = 0; p < imgsize; ++p) {
1076 newdata[component] = image[p];
1077 }
1078 do_set_ram_image(cdata, newdata);
1079 return;
1080 }
1081 for (size_t p = 0; p < imgsize; ++p) {
1082 for (uchar s = 0; s < format.size(); ++s) {
1083 signed char component = -1;
1084 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1085 component = 0;
1086 } else if (format.at(s) == 'G') {
1087 component = 1;
1088 } else if (format.at(s) == 'R') {
1089 component = 2;
1090 } else if (format.at(s) == 'A') {
1091 if (cdata->_num_components != 3) {
1092 component = cdata->_num_components - 1;
1093 } else {
1094 // Ignore.
1095 }
1096 } else if (format.at(s) == '0') {
1097 // Ignore.
1098 } else if (format.at(s) == '1') {
1099 // Ignore.
1100 } else {
1101 gobj_cat.error() << "Unexpected component character '"
1102 << format.at(s) << "', expected one of RGBA!\n";
1103 return;
1104 }
1105 if (component >= 0) {
1106 newdata[p * cdata->_num_components + component] = image[p * format.size() + s];
1107 }
1108 }
1109 }
1110 do_set_ram_image(cdata, newdata);
1111 return;
1112 }
1113 for (size_t p = 0; p < imgsize; ++p) {
1114 for (uchar s = 0; s < format.size(); ++s) {
1115 signed char component = -1;
1116 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1117 component = 0;
1118 } else if (format.at(s) == 'G') {
1119 component = 1;
1120 } else if (format.at(s) == 'R') {
1121 component = 2;
1122 } else if (format.at(s) == 'A') {
1123 if (cdata->_num_components != 3) {
1124 component = cdata->_num_components - 1;
1125 } else {
1126 // Ignore.
1127 }
1128 } else if (format.at(s) == '0') {
1129 // Ignore.
1130 } else if (format.at(s) == '1') {
1131 // Ignore.
1132 } else {
1133 gobj_cat.error() << "Unexpected component character '"
1134 << format.at(s) << "', expected one of RGBA!\n";
1135 return;
1136 }
1137 if (component >= 0) {
1138 memcpy((void*)(newdata + (p * cdata->_num_components + component) * cdata->_component_width),
1139 (void*)(image + (p * format.size() + s) * cdata->_component_width),
1140 cdata->_component_width);
1141 }
1142 }
1143 }
1144 do_set_ram_image(cdata, newdata);
1145 return;
1146}
1147
1148/**
1149 * Returns the flag that indicates whether this Texture is eligible to have
1150 * its main RAM copy of the texture memory dumped when the texture is prepared
1151 * for rendering. See set_keep_ram_image().
1152 */
1153bool Texture::
1154get_keep_ram_image() const {
1155 CDReader cdata(_cycler);
1156 return cdata->_keep_ram_image;
1157}
1158
1159/**
1160 * Returns true if there is enough information in this Texture object to write
1161 * it to the bam cache successfully, false otherwise. For most textures, this
1162 * is the same as has_ram_image().
1163 */
1164bool Texture::
1165is_cacheable() const {
1166 CDReader cdata(_cycler);
1167 return do_has_bam_rawdata(cdata);
1168}
1169
1170/**
1171 * Returns the number of contiguous mipmap levels that exist in RAM, up until
1172 * the first gap in the sequence. It is guaranteed that at least mipmap
1173 * levels [0, get_num_ram_mipmap_images()) exist.
1174 *
1175 * The number returned will never exceed the number of required mipmap images
1176 * based on the size of the texture and its filter mode.
1177 *
1178 * This method is different from get_num_ram_mipmap_images() in that it
1179 * returns only the number of mipmap levels that can actually be usefully
1180 * loaded, regardless of the actual number that may be stored.
1181 */
1182int Texture::
1184 CDReader cdata(_cycler);
1185 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
1186 // If we don't even have a base image, the answer is none.
1187 return 0;
1188 }
1189 if (!uses_mipmaps()) {
1190 // If we have a base image and don't require mipmapping, the answer is 1.
1191 return 1;
1192 }
1193
1194 // Check that we have enough mipmap levels to meet the size requirements.
1195 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
1196 int n = 0;
1197 int x = 1;
1198 while (x < size) {
1199 x = (x << 1);
1200 ++n;
1201 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
1202 return n;
1203 }
1204 }
1205
1206 ++n;
1207 return n;
1208}
1209
1210/**
1211 * Returns the system-RAM image data associated with the nth mipmap level, if
1212 * present. Returns NULL if the nth mipmap level is not present.
1213 */
1215get_ram_mipmap_image(int n) const {
1216 CDReader cdata(_cycler);
1217 if (n < (int)cdata->_ram_images.size() && !cdata->_ram_images[n]._image.empty()) {
1218 return cdata->_ram_images[n]._image;
1219 }
1220 return CPTA_uchar(get_class_type());
1221}
1222
1223/**
1224 * Similiar to get_ram_mipmap_image(), however, in this case the void pointer
1225 * for the given ram image is returned. This will be NULL unless it has been
1226 * explicitly set.
1227 */
1229get_ram_mipmap_pointer(int n) const {
1230 CDReader cdata(_cycler);
1231 if (n < (int)cdata->_ram_images.size()) {
1232 return cdata->_ram_images[n]._pointer_image;
1233 }
1234 return nullptr;
1235}
1236
1237/**
1238 * Sets an explicit void pointer as the texture's mipmap image for the
1239 * indicated level. This is a special call to direct a texture to reference
1240 * some external image location, for instance from a webcam input.
1241 *
1242 * The texture will henceforth reference this pointer directly, instead of its
1243 * own internal storage; the user is responsible for ensuring the data at this
1244 * address remains allocated and valid, and in the correct format, during the
1245 * lifetime of the texture.
1246 */
1248set_ram_mipmap_pointer(int n, void *image, size_t page_size) {
1249 CDWriter cdata(_cycler, true);
1250 nassertv(cdata->_ram_image_compression != CM_off || do_get_expected_ram_mipmap_image_size(cdata, n));
1251
1252 while (n >= (int)cdata->_ram_images.size()) {
1253 cdata->_ram_images.push_back(RamImage());
1254 }
1255
1256 cdata->_ram_images[n]._page_size = page_size;
1257 // _ram_images[n]._image.clear(); wtf is going on?!
1258 cdata->_ram_images[n]._pointer_image = image;
1259 cdata->inc_image_modified();
1260}
1261
1262/**
1263 * Accepts a raw pointer cast as an int, which is then passed to
1264 * set_ram_mipmap_pointer(); see the documentation for that method.
1265 *
1266 * This variant is particularly useful to set an external pointer from a
1267 * language like Python, which doesn't support void pointers directly.
1268 */
1270set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size) {
1271 set_ram_mipmap_pointer(n, (void*)pointer, (size_t)page_size);
1272}
1273
1274/**
1275 * Discards the current system-RAM image for the nth mipmap level.
1276 */
1279 CDWriter cdata(_cycler, true);
1280 if (n >= (int)cdata->_ram_images.size()) {
1281 return;
1282 }
1283 cdata->_ram_images[n]._page_size = 0;
1284 cdata->_ram_images[n]._image.clear();
1285 cdata->_ram_images[n]._pointer_image = nullptr;
1286}
1287
1288/**
1289 * Returns a modifiable pointer to the internal "simple" texture image. See
1290 * set_simple_ram_image().
1291 */
1292PTA_uchar Texture::
1294 CDWriter cdata(_cycler, true);
1295 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1296 return cdata->_simple_ram_image._image;
1297}
1298
1299/**
1300 * Creates an empty array for the simple ram image of the indicated size, and
1301 * returns a modifiable pointer to the new array. See set_simple_ram_image().
1302 */
1303PTA_uchar Texture::
1304new_simple_ram_image(int x_size, int y_size) {
1305 CDWriter cdata(_cycler, true);
1306 nassertr(cdata->_texture_type == TT_2d_texture, PTA_uchar());
1307 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1308
1309 cdata->_simple_x_size = x_size;
1310 cdata->_simple_y_size = y_size;
1311 cdata->_simple_ram_image._image = PTA_uchar::empty_array(expected_page_size);
1312 cdata->_simple_ram_image._page_size = expected_page_size;
1313 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1314 cdata->inc_simple_image_modified();
1315
1316 return cdata->_simple_ram_image._image;
1317}
1318
1319/**
1320 * Computes the "simple" ram image by loading the main RAM image, if it is not
1321 * already available, and reducing it to 16x16 or smaller. This may be an
1322 * expensive operation.
1323 */
1326 CDWriter cdata(_cycler, true);
1327
1328 if (cdata->_texture_type != TT_2d_texture ||
1329 cdata->_ram_image_compression != CM_off) {
1330 return;
1331 }
1332
1333 PNMImage pnmimage;
1334 if (!do_store_one(cdata, pnmimage, 0, 0)) {
1335 return;
1336 }
1337
1338 // Start at the suggested size from the config file.
1339 int x_size = simple_image_size.get_word(0);
1340 int y_size = simple_image_size.get_word(1);
1341
1342 // Limit it to no larger than the source image, and also make it a power of
1343 // two.
1344 x_size = down_to_power_2(min(x_size, cdata->_x_size));
1345 y_size = down_to_power_2(min(y_size, cdata->_y_size));
1346
1347 // Generate a reduced image of that size.
1348 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels());
1349 scaled.quick_filter_from(pnmimage);
1350
1351 // Make sure the reduced image has 4 components, by convention.
1352 if (!scaled.has_alpha()) {
1353 scaled.add_alpha();
1354 scaled.alpha_fill(1.0);
1355 }
1356 scaled.set_num_channels(4);
1357
1358 // Now see if we can go even smaller.
1359 bool did_anything;
1360 do {
1361 did_anything = false;
1362
1363 // Try to reduce X.
1364 if (x_size > 1) {
1365 int new_x_size = (x_size >> 1);
1366 PNMImage smaller(new_x_size, y_size, 4);
1367 smaller.quick_filter_from(scaled);
1368 PNMImage bigger(x_size, y_size, 4);
1369 bigger.quick_filter_from(smaller);
1370
1371 if (compare_images(scaled, bigger)) {
1372 scaled.take_from(smaller);
1373 x_size = new_x_size;
1374 did_anything = true;
1375 }
1376 }
1377
1378 // Try to reduce Y.
1379 if (y_size > 1) {
1380 int new_y_size = (y_size >> 1);
1381 PNMImage smaller(x_size, new_y_size, 4);
1382 smaller.quick_filter_from(scaled);
1383 PNMImage bigger(x_size, y_size, 4);
1384 bigger.quick_filter_from(smaller);
1385
1386 if (compare_images(scaled, bigger)) {
1387 scaled.take_from(smaller);
1388 y_size = new_y_size;
1389 did_anything = true;
1390 }
1391 }
1392 } while (did_anything);
1393
1394 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1395 PTA_uchar image = PTA_uchar::empty_array(expected_page_size, get_class_type());
1396 convert_from_pnmimage(image, expected_page_size, x_size, 0, 0, 0, scaled, 4, 1);
1397
1398 do_set_simple_ram_image(cdata, image, x_size, y_size);
1399 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1400}
1401
1402/**
1403 * Returns a TexturePeeker object that can be used to examine the individual
1404 * texels stored within this Texture by (u, v) coordinate.
1405 *
1406 * If the texture has a ram image resident, that image is used. If it does
1407 * not have a full ram image but does have a simple_ram_image resident, that
1408 * image is used instead. If neither image is resident the full image is
1409 * reloaded.
1410 *
1411 * Returns NULL if the texture cannot find an image to load, or the texture
1412 * format is incompatible.
1413 */
1415peek() {
1416 CDWriter cdata(_cycler, unlocked_ensure_ram_image(true));
1417
1418 PT(TexturePeeker) peeker = new TexturePeeker(this, cdata);
1419 if (peeker->is_valid()) {
1420 return peeker;
1421 }
1422
1423 return nullptr;
1424}
1425
1426/**
1427 * Indicates that the texture should be enqueued to be prepared in the
1428 * indicated prepared_objects at the beginning of the next frame. This will
1429 * ensure the texture is already loaded into texture memory if it is expected
1430 * to be rendered soon.
1431 *
1432 * Use this function instead of prepare_now() to preload textures from a user
1433 * interface standpoint.
1434 */
1435PT(AsyncFuture) Texture::
1436prepare(PreparedGraphicsObjects *prepared_objects) {
1437 return prepared_objects->enqueue_texture_future(this);
1438}
1439
1440/**
1441 * Returns true if the texture has already been prepared or enqueued for
1442 * preparation on the indicated GSG, false otherwise.
1443 */
1445is_prepared(PreparedGraphicsObjects *prepared_objects) const {
1446 MutexHolder holder(_lock);
1447 PreparedViews::const_iterator pvi;
1448 pvi = _prepared_views.find(prepared_objects);
1449 if (pvi != _prepared_views.end()) {
1450 return true;
1451 }
1452 return prepared_objects->is_texture_queued(this);
1453}
1454
1455/**
1456 * Returns true if the texture needs to be re-loaded onto the indicated GSG,
1457 * either because its image data is out-of-date, or because it's not fully
1458 * prepared now.
1459 */
1461was_image_modified(PreparedGraphicsObjects *prepared_objects) const {
1462 MutexHolder holder(_lock);
1463 CDReader cdata(_cycler);
1464
1465 PreparedViews::const_iterator pvi;
1466 pvi = _prepared_views.find(prepared_objects);
1467 if (pvi != _prepared_views.end()) {
1468 const Contexts &contexts = (*pvi).second;
1469 for (int view = 0; view < cdata->_num_views; ++view) {
1470 Contexts::const_iterator ci;
1471 ci = contexts.find(view);
1472 if (ci == contexts.end()) {
1473 return true;
1474 }
1475 TextureContext *tc = (*ci).second;
1476 if (tc->was_image_modified()) {
1477 return true;
1478 }
1479 }
1480 return false;
1481 }
1482 return true;
1483}
1484
1485/**
1486 * Returns the number of bytes which the texture is reported to consume within
1487 * graphics memory, for the indicated GSG. This may return a nonzero value
1488 * even if the texture is not currently resident; you should also check
1489 * get_resident() if you want to know how much space the texture is actually
1490 * consuming right now.
1491 */
1493get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const {
1494 MutexHolder holder(_lock);
1495 CDReader cdata(_cycler);
1496
1497 PreparedViews::const_iterator pvi;
1498 size_t total_size = 0;
1499 pvi = _prepared_views.find(prepared_objects);
1500 if (pvi != _prepared_views.end()) {
1501 const Contexts &contexts = (*pvi).second;
1502 for (int view = 0; view < cdata->_num_views; ++view) {
1503 Contexts::const_iterator ci;
1504 ci = contexts.find(view);
1505 if (ci != contexts.end()) {
1506 TextureContext *tc = (*ci).second;
1507 total_size += tc->get_data_size_bytes();
1508 }
1509 }
1510 }
1511
1512 return total_size;
1513}
1514
1515/**
1516 * Returns true if this Texture was rendered in the most recent frame within
1517 * the indicated GSG.
1518 */
1520get_active(PreparedGraphicsObjects *prepared_objects) const {
1521 MutexHolder holder(_lock);
1522 CDReader cdata(_cycler);
1523
1524 PreparedViews::const_iterator pvi;
1525 pvi = _prepared_views.find(prepared_objects);
1526 if (pvi != _prepared_views.end()) {
1527 const Contexts &contexts = (*pvi).second;
1528 for (int view = 0; view < cdata->_num_views; ++view) {
1529 Contexts::const_iterator ci;
1530 ci = contexts.find(view);
1531 if (ci != contexts.end()) {
1532 TextureContext *tc = (*ci).second;
1533 if (tc->get_active()) {
1534 return true;
1535 }
1536 }
1537 }
1538 }
1539 return false;
1540}
1541
1542/**
1543 * Returns true if this Texture is reported to be resident within graphics
1544 * memory for the indicated GSG.
1545 */
1547get_resident(PreparedGraphicsObjects *prepared_objects) const {
1548 MutexHolder holder(_lock);
1549 CDReader cdata(_cycler);
1550
1551 PreparedViews::const_iterator pvi;
1552 pvi = _prepared_views.find(prepared_objects);
1553 if (pvi != _prepared_views.end()) {
1554 const Contexts &contexts = (*pvi).second;
1555 for (int view = 0; view < cdata->_num_views; ++view) {
1556 Contexts::const_iterator ci;
1557 ci = contexts.find(view);
1558 if (ci != contexts.end()) {
1559 TextureContext *tc = (*ci).second;
1560 if (tc->get_resident()) {
1561 return true;
1562 }
1563 }
1564 }
1565 }
1566 return false;
1567}
1568
1569/**
1570 * Frees the texture context only on the indicated object, if it exists there.
1571 * Returns true if it was released, false if it had not been prepared.
1572 */
1574release(PreparedGraphicsObjects *prepared_objects) {
1575 MutexHolder holder(_lock);
1576 PreparedViews::iterator pvi;
1577 pvi = _prepared_views.find(prepared_objects);
1578 if (pvi != _prepared_views.end()) {
1579 Contexts temp;
1580 temp.swap((*pvi).second);
1581 Contexts::iterator ci;
1582 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1583 TextureContext *tc = (*ci).second;
1584 if (tc != nullptr) {
1585 prepared_objects->release_texture(tc);
1586 }
1587 }
1588 _prepared_views.erase(pvi);
1589 }
1590
1591 // Maybe it wasn't prepared yet, but it's about to be.
1592 return prepared_objects->dequeue_texture(this);
1593}
1594
1595/**
1596 * Frees the context allocated on all objects for which the texture has been
1597 * declared. Returns the number of contexts which have been freed.
1598 */
1600release_all() {
1601 MutexHolder holder(_lock);
1602
1603 // We have to traverse a copy of the _prepared_views list, because the
1604 // PreparedGraphicsObjects object will call clear_prepared() in response to
1605 // each release_texture(), and we don't want to be modifying the
1606 // _prepared_views list while we're traversing it.
1607 PreparedViews temp;
1608 temp.swap(_prepared_views);
1609 int num_freed = (int)temp.size();
1610
1611 PreparedViews::iterator pvi;
1612 for (pvi = temp.begin(); pvi != temp.end(); ++pvi) {
1613 PreparedGraphicsObjects *prepared_objects = (*pvi).first;
1614 Contexts temp;
1615 temp.swap((*pvi).second);
1616 Contexts::iterator ci;
1617 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1618 TextureContext *tc = (*ci).second;
1619 if (tc != nullptr) {
1620 prepared_objects->release_texture(tc);
1621 }
1622 }
1623 }
1624
1625 return num_freed;
1626}
1627
1628/**
1629 * Not to be confused with write(Filename), this method simply describes the
1630 * texture properties.
1631 */
1633write(ostream &out, int indent_level) const {
1634 CDReader cdata(_cycler);
1635 indent(out, indent_level)
1636 << cdata->_texture_type << " " << get_name();
1637 if (!cdata->_filename.empty()) {
1638 out << " (from " << cdata->_filename << ")";
1639 }
1640 out << "\n";
1641
1642 indent(out, indent_level + 2);
1643
1644 switch (cdata->_texture_type) {
1645 case TT_1d_texture:
1646 out << "1-d, " << cdata->_x_size;
1647 break;
1648
1649 case TT_2d_texture:
1650 out << "2-d, " << cdata->_x_size << " x " << cdata->_y_size;
1651 break;
1652
1653 case TT_3d_texture:
1654 out << "3-d, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1655 break;
1656
1657 case TT_2d_texture_array:
1658 out << "2-d array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1659 break;
1660
1661 case TT_cube_map:
1662 out << "cube map, " << cdata->_x_size << " x " << cdata->_y_size;
1663 break;
1664
1665 case TT_cube_map_array:
1666 out << "cube map array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1667 break;
1668
1669 case TT_buffer_texture:
1670 out << "buffer, " << cdata->_x_size;
1671 break;
1672
1673 case TT_1d_texture_array:
1674 out << "1-d array, " << cdata->_x_size << " x " << cdata->_y_size;
1675 break;
1676 }
1677
1678 if (cdata->_num_views > 1) {
1679 out << " (x " << cdata->_num_views << " views)";
1680 }
1681
1682 out << " pixels, each " << cdata->_num_components;
1683
1684 switch (cdata->_component_type) {
1685 case T_unsigned_byte:
1686 case T_byte:
1687 out << " bytes";
1688 break;
1689
1690 case T_unsigned_short:
1691 case T_short:
1692 out << " shorts";
1693 break;
1694
1695 case T_half_float:
1696 out << " half";
1697 case T_float:
1698 out << " floats";
1699 break;
1700
1701 case T_unsigned_int_24_8:
1702 case T_int:
1703 case T_unsigned_int:
1704 out << " ints";
1705 break;
1706
1707 default:
1708 break;
1709 }
1710
1711 out << ", ";
1712 switch (cdata->_format) {
1713 case F_color_index:
1714 out << "color_index";
1715 break;
1716 case F_depth_stencil:
1717 out << "depth_stencil";
1718 break;
1719 case F_depth_component:
1720 out << "depth_component";
1721 break;
1722 case F_depth_component16:
1723 out << "depth_component16";
1724 break;
1725 case F_depth_component24:
1726 out << "depth_component24";
1727 break;
1728 case F_depth_component32:
1729 out << "depth_component32";
1730 break;
1731
1732 case F_rgba:
1733 out << "rgba";
1734 break;
1735 case F_rgbm:
1736 out << "rgbm";
1737 break;
1738 case F_rgba32:
1739 out << "rgba32";
1740 break;
1741 case F_rgba16:
1742 out << "rgba16";
1743 break;
1744 case F_rgba12:
1745 out << "rgba12";
1746 break;
1747 case F_rgba8:
1748 out << "rgba8";
1749 break;
1750 case F_rgba4:
1751 out << "rgba4";
1752 break;
1753
1754 case F_rgb:
1755 out << "rgb";
1756 break;
1757 case F_rgb12:
1758 out << "rgb12";
1759 break;
1760 case F_rgb8:
1761 out << "rgb8";
1762 break;
1763 case F_rgb5:
1764 out << "rgb5";
1765 break;
1766 case F_rgba5:
1767 out << "rgba5";
1768 break;
1769 case F_rgb332:
1770 out << "rgb332";
1771 break;
1772
1773 case F_red:
1774 out << "red";
1775 break;
1776 case F_green:
1777 out << "green";
1778 break;
1779 case F_blue:
1780 out << "blue";
1781 break;
1782 case F_alpha:
1783 out << "alpha";
1784 break;
1785 case F_luminance:
1786 out << "luminance";
1787 break;
1788 case F_luminance_alpha:
1789 out << "luminance_alpha";
1790 break;
1791 case F_luminance_alphamask:
1792 out << "luminance_alphamask";
1793 break;
1794
1795 case F_r16:
1796 out << "r16";
1797 break;
1798 case F_rg16:
1799 out << "rg16";
1800 break;
1801 case F_rgb16:
1802 out << "rgb16";
1803 break;
1804
1805 case F_srgb:
1806 out << "srgb";
1807 break;
1808 case F_srgb_alpha:
1809 out << "srgb_alpha";
1810 break;
1811 case F_sluminance:
1812 out << "sluminance";
1813 break;
1814 case F_sluminance_alpha:
1815 out << "sluminance_alpha";
1816 break;
1817
1818 case F_r32i:
1819 out << "r32i";
1820 break;
1821
1822 case F_r32:
1823 out << "r32";
1824 break;
1825 case F_rg32:
1826 out << "rg32";
1827 break;
1828 case F_rgb32:
1829 out << "rgb32";
1830 break;
1831
1832 case F_r8i:
1833 out << "r8i";
1834 break;
1835 case F_rg8i:
1836 out << "rg8i";
1837 break;
1838 case F_rgb8i:
1839 out << "rgb8i";
1840 break;
1841 case F_rgba8i:
1842 out << "rgba8i";
1843 break;
1844 case F_r11_g11_b10:
1845 out << "r11_g11_b10";
1846 break;
1847 case F_rgb9_e5:
1848 out << "rgb9_e5";
1849 break;
1850 case F_rgb10_a2:
1851 out << "rgb10_a2";
1852 break;
1853
1854 case F_rg:
1855 out << "rg";
1856 break;
1857
1858 case F_r16i:
1859 out << "r16i";
1860 break;
1861 case F_rg16i:
1862 out << "rg16i";
1863 break;
1864 case F_rgb16i:
1865 out << "rgb16i";
1866 break;
1867 case F_rgba16i:
1868 out << "rgba16i";
1869 break;
1870
1871 case F_rg32i:
1872 out << "rg32i";
1873 break;
1874 case F_rgb32i:
1875 out << "rgb32i";
1876 break;
1877 case F_rgba32i:
1878 out << "rgba32i";
1879 break;
1880 }
1881
1882 if (cdata->_compression != CM_default) {
1883 out << ", compression " << cdata->_compression;
1884 }
1885 out << "\n";
1886
1887 indent(out, indent_level + 2);
1888
1889 cdata->_default_sampler.output(out);
1890
1891 if (do_has_ram_image(cdata)) {
1892 indent(out, indent_level + 2)
1893 << do_get_ram_image_size(cdata) << " bytes in ram, compression "
1894 << cdata->_ram_image_compression << "\n";
1895
1896 if (cdata->_ram_images.size() > 1) {
1897 int count = 0;
1898 size_t total_size = 0;
1899 for (size_t n = 1; n < cdata->_ram_images.size(); ++n) {
1900 if (!cdata->_ram_images[n]._image.empty()) {
1901 ++count;
1902 total_size += cdata->_ram_images[n]._image.size();
1903 } else {
1904 // Stop at the first gap.
1905 break;
1906 }
1907 }
1908 indent(out, indent_level + 2)
1909 << count
1910 << " mipmap levels also present in ram (" << total_size
1911 << " bytes).\n";
1912 }
1913
1914 } else {
1915 indent(out, indent_level + 2)
1916 << "no ram image\n";
1917 }
1918
1919 if (!cdata->_simple_ram_image._image.empty()) {
1920 indent(out, indent_level + 2)
1921 << "simple image: " << cdata->_simple_x_size << " x "
1922 << cdata->_simple_y_size << ", "
1923 << cdata->_simple_ram_image._image.size() << " bytes\n";
1924 }
1925}
1926
1927
1928/**
1929 * Changes the size of the texture, padding if necessary, and setting the pad
1930 * region as well.
1931 */
1933set_size_padded(int x, int y, int z) {
1934 CDWriter cdata(_cycler, true);
1935 if (do_get_auto_texture_scale(cdata) != ATS_none) {
1936 do_set_x_size(cdata, up_to_power_2(x));
1937 do_set_y_size(cdata, up_to_power_2(y));
1938
1939 if (cdata->_texture_type == TT_3d_texture) {
1940 // Only pad 3D textures. It does not make sense to do so for cube maps
1941 // or 2D texture arrays.
1942 do_set_z_size(cdata, up_to_power_2(z));
1943 } else {
1944 do_set_z_size(cdata, z);
1945 }
1946 } else {
1947 do_set_x_size(cdata, x);
1948 do_set_y_size(cdata, y);
1949 do_set_z_size(cdata, z);
1950 }
1951 do_set_pad_size(cdata,
1952 cdata->_x_size - x,
1953 cdata->_y_size - y,
1954 cdata->_z_size - z);
1955}
1956
1957/**
1958 * Specifies the size of the texture as it exists in its original disk file,
1959 * before any Panda scaling.
1960 */
1962set_orig_file_size(int x, int y, int z) {
1963 CDWriter cdata(_cycler, true);
1964 cdata->_orig_file_x_size = x;
1965 cdata->_orig_file_y_size = y;
1966
1967 nassertv(z == cdata->_z_size);
1968}
1969
1970/**
1971 * Creates a context for the texture on the particular GSG, if it does not
1972 * already exist. Returns the new (or old) TextureContext. This assumes that
1973 * the GraphicsStateGuardian is the currently active rendering context and
1974 * that it is ready to accept new textures. If this is not necessarily the
1975 * case, you should use prepare() instead.
1976 *
1977 * Normally, this is not called directly except by the GraphicsStateGuardian;
1978 * a texture does not need to be explicitly prepared by the user before it may
1979 * be rendered.
1980 */
1982prepare_now(int view,
1983 PreparedGraphicsObjects *prepared_objects,
1985 MutexHolder holder(_lock);
1986 CDReader cdata(_cycler);
1987
1988 // Don't exceed the actual number of views.
1989 view = max(min(view, cdata->_num_views - 1), 0);
1990
1991 // Get the list of PreparedGraphicsObjects for this view.
1992 Contexts &contexts = _prepared_views[prepared_objects];
1993 Contexts::const_iterator pvi;
1994 pvi = contexts.find(view);
1995 if (pvi != contexts.end()) {
1996 return (*pvi).second;
1997 }
1998
1999 TextureContext *tc = prepared_objects->prepare_texture_now(this, view, gsg);
2000 contexts[view] = tc;
2001
2002 return tc;
2003}
2004
2005/**
2006 * Returns the smallest power of 2 greater than or equal to value.
2007 */
2009up_to_power_2(int value) {
2010 if (value <= 1) {
2011 return 1;
2012 }
2013 int bit = get_next_higher_bit(((unsigned int)value) - 1);
2014 return (1 << bit);
2015}
2016
2017/**
2018 * Returns the largest power of 2 less than or equal to value.
2019 */
2021down_to_power_2(int value) {
2022 if (value <= 1) {
2023 return 1;
2024 }
2025 int bit = get_next_higher_bit(((unsigned int)value) >> 1);
2026 return (1 << bit);
2027}
2028
2029/**
2030 * Asks the PNMImage to change its scale when it reads the image, according to
2031 * the whims of the Config.prc file.
2032 *
2033 * For most efficient results, this method should be called after
2034 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2035 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2036 * already loaded; in this case it will rescale the image on the spot. Also
2037 * see rescale_texture().
2038 */
2040consider_rescale(PNMImage &pnmimage) {
2041 consider_rescale(pnmimage, get_name(), get_auto_texture_scale());
2042}
2043
2044/**
2045 * Asks the PNMImage to change its scale when it reads the image, according to
2046 * the whims of the Config.prc file.
2047 *
2048 * For most efficient results, this method should be called after
2049 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2050 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2051 * already loaded; in this case it will rescale the image on the spot. Also
2052 * see rescale_texture().
2053 */
2055consider_rescale(PNMImage &pnmimage, const string &name, AutoTextureScale auto_texture_scale) {
2056 int new_x_size = pnmimage.get_x_size();
2057 int new_y_size = pnmimage.get_y_size();
2058 if (adjust_size(new_x_size, new_y_size, name, false, auto_texture_scale)) {
2059 if (pnmimage.is_valid()) {
2060 // The image is already loaded. Rescale on the spot.
2061 PNMImage new_image(new_x_size, new_y_size, pnmimage.get_num_channels(),
2062 pnmimage.get_maxval(), pnmimage.get_type(),
2063 pnmimage.get_color_space());
2064 new_image.quick_filter_from(pnmimage);
2065 pnmimage.take_from(new_image);
2066 } else {
2067 // Rescale while reading. Some image types (e.g. jpeg) can take
2068 // advantage of this.
2069 pnmimage.set_read_size(new_x_size, new_y_size);
2070 }
2071 }
2072}
2073
2074/**
2075 * Returns the indicated TextureType converted to a string word.
2076 */
2078format_texture_type(TextureType tt) {
2079 switch (tt) {
2080 case TT_1d_texture:
2081 return "1d_texture";
2082 case TT_2d_texture:
2083 return "2d_texture";
2084 case TT_3d_texture:
2085 return "3d_texture";
2086 case TT_2d_texture_array:
2087 return "2d_texture_array";
2088 case TT_cube_map:
2089 return "cube_map";
2090 case TT_cube_map_array:
2091 return "cube_map_array";
2092 case TT_buffer_texture:
2093 return "buffer_texture";
2094 case TT_1d_texture_array:
2095 return "1d_texture_array";
2096 }
2097 return "**invalid**";
2098}
2099
2100/**
2101 * Returns the TextureType corresponding to the indicated string word.
2102 */
2103Texture::TextureType Texture::
2104string_texture_type(const string &str) {
2105 if (cmp_nocase(str, "1d_texture") == 0) {
2106 return TT_1d_texture;
2107 } else if (cmp_nocase(str, "2d_texture") == 0) {
2108 return TT_2d_texture;
2109 } else if (cmp_nocase(str, "3d_texture") == 0) {
2110 return TT_3d_texture;
2111 } else if (cmp_nocase(str, "2d_texture_array") == 0) {
2112 return TT_2d_texture_array;
2113 } else if (cmp_nocase(str, "cube_map") == 0) {
2114 return TT_cube_map;
2115 } else if (cmp_nocase(str, "cube_map_array") == 0) {
2116 return TT_cube_map_array;
2117 } else if (cmp_nocase(str, "buffer_texture") == 0) {
2118 return TT_buffer_texture;
2119 }
2120
2121 gobj_cat->error()
2122 << "Invalid Texture::TextureType value: " << str << "\n";
2123 return TT_2d_texture;
2124}
2125
2126/**
2127 * Returns the indicated ComponentType converted to a string word.
2128 */
2130format_component_type(ComponentType ct) {
2131 switch (ct) {
2132 case T_unsigned_byte:
2133 return "unsigned_byte";
2134 case T_unsigned_short:
2135 return "unsigned_short";
2136 case T_float:
2137 return "float";
2138 case T_unsigned_int_24_8:
2139 return "unsigned_int_24_8";
2140 case T_int:
2141 return "int";
2142 case T_byte:
2143 return "unsigned_byte";
2144 case T_short:
2145 return "short";
2146 case T_half_float:
2147 return "half_float";
2148 case T_unsigned_int:
2149 return "unsigned_int";
2150 }
2151
2152 return "**invalid**";
2153}
2154
2155/**
2156 * Returns the ComponentType corresponding to the indicated string word.
2157 */
2158Texture::ComponentType Texture::
2159string_component_type(const string &str) {
2160 if (cmp_nocase(str, "unsigned_byte") == 0) {
2161 return T_unsigned_byte;
2162 } else if (cmp_nocase(str, "unsigned_short") == 0) {
2163 return T_unsigned_short;
2164 } else if (cmp_nocase(str, "float") == 0) {
2165 return T_float;
2166 } else if (cmp_nocase(str, "unsigned_int_24_8") == 0) {
2167 return T_unsigned_int_24_8;
2168 } else if (cmp_nocase(str, "int") == 0) {
2169 return T_int;
2170 } else if (cmp_nocase(str, "byte") == 0) {
2171 return T_byte;
2172 } else if (cmp_nocase(str, "short") == 0) {
2173 return T_short;
2174 } else if (cmp_nocase(str, "half_float") == 0) {
2175 return T_half_float;
2176 } else if (cmp_nocase(str, "unsigned_int") == 0) {
2177 return T_unsigned_int;
2178 }
2179
2180 gobj_cat->error()
2181 << "Invalid Texture::ComponentType value: " << str << "\n";
2182 return T_unsigned_byte;
2183}
2184
2185/**
2186 * Returns the indicated Format converted to a string word.
2187 */
2189format_format(Format format) {
2190 switch (format) {
2191 case F_depth_stencil:
2192 return "depth_stencil";
2193 case F_depth_component:
2194 return "depth_component";
2195 case F_depth_component16:
2196 return "depth_component16";
2197 case F_depth_component24:
2198 return "depth_component24";
2199 case F_depth_component32:
2200 return "depth_component32";
2201 case F_color_index:
2202 return "color_index";
2203 case F_red:
2204 return "red";
2205 case F_green:
2206 return "green";
2207 case F_blue:
2208 return "blue";
2209 case F_alpha:
2210 return "alpha";
2211 case F_rgb:
2212 return "rgb";
2213 case F_rgb5:
2214 return "rgb5";
2215 case F_rgb8:
2216 return "rgb8";
2217 case F_rgb12:
2218 return "rgb12";
2219 case F_rgb332:
2220 return "rgb332";
2221 case F_rgba:
2222 return "rgba";
2223 case F_rgbm:
2224 return "rgbm";
2225 case F_rgba4:
2226 return "rgba4";
2227 case F_rgba5:
2228 return "rgba5";
2229 case F_rgba8:
2230 return "rgba8";
2231 case F_rgba12:
2232 return "rgba12";
2233 case F_luminance:
2234 return "luminance";
2235 case F_luminance_alpha:
2236 return "luminance_alpha";
2237 case F_luminance_alphamask:
2238 return "luminance_alphamask";
2239 case F_rgba16:
2240 return "rgba16";
2241 case F_rgba32:
2242 return "rgba32";
2243 case F_r16:
2244 return "r16";
2245 case F_rg16:
2246 return "rg16";
2247 case F_rgb16:
2248 return "rgb16";
2249 case F_srgb:
2250 return "srgb";
2251 case F_srgb_alpha:
2252 return "srgb_alpha";
2253 case F_sluminance:
2254 return "sluminance";
2255 case F_sluminance_alpha:
2256 return "sluminance_alpha";
2257 case F_r32i:
2258 return "r32i";
2259 case F_r32:
2260 return "r32";
2261 case F_rg32:
2262 return "rg32";
2263 case F_rgb32:
2264 return "rgb32";
2265 case F_r8i:
2266 return "r8i";
2267 case F_rg8i:
2268 return "rg8i";
2269 case F_rgb8i:
2270 return "rgb8i";
2271 case F_rgba8i:
2272 return "rgba8i";
2273 case F_r11_g11_b10:
2274 return "r11g11b10";
2275 case F_rgb9_e5:
2276 return "rgb9_e5";
2277 case F_rgb10_a2:
2278 return "rgb10_a2";
2279 case F_rg:
2280 return "rg";
2281 case F_r16i:
2282 return "r16i";
2283 case F_rg16i:
2284 return "rg16i";
2285 case F_rgb16i:
2286 return "rgb16i";
2287 case F_rgba16i:
2288 return "rgba16i";
2289 case F_rg32i:
2290 return "rg32i";
2291 case F_rgb32i:
2292 return "rgb32i";
2293 case F_rgba32i:
2294 return "rgba32i";
2295 }
2296 return "**invalid**";
2297}
2298
2299/**
2300 * Returns the Format corresponding to the indicated string word.
2301 */
2302Texture::Format Texture::
2303string_format(const string &str) {
2304 if (cmp_nocase(str, "depth_stencil") == 0) {
2305 return F_depth_stencil;
2306 } else if (cmp_nocase(str, "depth_component") == 0) {
2307 return F_depth_component;
2308 } else if (cmp_nocase(str, "depth_component16") == 0 || cmp_nocase(str, "d16") == 0) {
2309 return F_depth_component16;
2310 } else if (cmp_nocase(str, "depth_component24") == 0 || cmp_nocase(str, "d24") == 0) {
2311 return F_depth_component24;
2312 } else if (cmp_nocase(str, "depth_component32") == 0 || cmp_nocase(str, "d32") == 0) {
2313 return F_depth_component32;
2314 } else if (cmp_nocase(str, "color_index") == 0) {
2315 return F_color_index;
2316 } else if (cmp_nocase(str, "red") == 0) {
2317 return F_red;
2318 } else if (cmp_nocase(str, "green") == 0) {
2319 return F_green;
2320 } else if (cmp_nocase(str, "blue") == 0) {
2321 return F_blue;
2322 } else if (cmp_nocase(str, "alpha") == 0) {
2323 return F_alpha;
2324 } else if (cmp_nocase(str, "rgb") == 0) {
2325 return F_rgb;
2326 } else if (cmp_nocase(str, "rgb5") == 0) {
2327 return F_rgb5;
2328 } else if (cmp_nocase(str, "rgb8") == 0 || cmp_nocase(str, "r8g8b8") == 0) {
2329 return F_rgb8;
2330 } else if (cmp_nocase(str, "rgb12") == 0) {
2331 return F_rgb12;
2332 } else if (cmp_nocase(str, "rgb332") == 0 || cmp_nocase(str, "r3g3b2") == 0) {
2333 return F_rgb332;
2334 } else if (cmp_nocase(str, "rgba") == 0) {
2335 return F_rgba;
2336 } else if (cmp_nocase(str, "rgbm") == 0) {
2337 return F_rgbm;
2338 } else if (cmp_nocase(str, "rgba4") == 0) {
2339 return F_rgba4;
2340 } else if (cmp_nocase(str, "rgba5") == 0) {
2341 return F_rgba5;
2342 } else if (cmp_nocase(str, "rgba8") == 0 || cmp_nocase(str, "r8g8b8a8") == 0) {
2343 return F_rgba8;
2344 } else if (cmp_nocase(str, "rgba12") == 0) {
2345 return F_rgba12;
2346 } else if (cmp_nocase(str, "luminance") == 0) {
2347 return F_luminance;
2348 } else if (cmp_nocase(str, "luminance_alpha") == 0) {
2349 return F_luminance_alpha;
2350 } else if (cmp_nocase(str, "luminance_alphamask") == 0) {
2351 return F_luminance_alphamask;
2352 } else if (cmp_nocase(str, "rgba16") == 0 || cmp_nocase(str, "r16g16b16a16") == 0) {
2353 return F_rgba16;
2354 } else if (cmp_nocase(str, "rgba32") == 0 || cmp_nocase(str, "r32g32b32a32") == 0) {
2355 return F_rgba32;
2356 } else if (cmp_nocase(str, "r16") == 0 || cmp_nocase(str, "red16") == 0) {
2357 return F_r16;
2358 } else if (cmp_nocase(str, "r16i") == 0) {
2359 return F_r16i;
2360 } else if (cmp_nocase(str, "rg16") == 0 || cmp_nocase(str, "r16g16") == 0) {
2361 return F_rg16;
2362 } else if (cmp_nocase(str, "rgb16") == 0 || cmp_nocase(str, "r16g16b16") == 0) {
2363 return F_rgb16;
2364 } else if (cmp_nocase(str, "srgb") == 0) {
2365 return F_srgb;
2366 } else if (cmp_nocase(str, "srgb_alpha") == 0) {
2367 return F_srgb_alpha;
2368 } else if (cmp_nocase(str, "sluminance") == 0) {
2369 return F_sluminance;
2370 } else if (cmp_nocase(str, "sluminance_alpha") == 0) {
2371 return F_sluminance_alpha;
2372 } else if (cmp_nocase(str, "r32i") == 0) {
2373 return F_r32i;
2374 } else if (cmp_nocase(str, "r32") == 0 || cmp_nocase(str, "red32") == 0) {
2375 return F_r32;
2376 } else if (cmp_nocase(str, "rg32") == 0 || cmp_nocase(str, "r32g32") == 0) {
2377 return F_rg32;
2378 } else if (cmp_nocase(str, "rgb32") == 0 || cmp_nocase(str, "r32g32b32") == 0) {
2379 return F_rgb32;
2380 } else if (cmp_nocase_uh(str, "r8i") == 0) {
2381 return F_r8i;
2382 } else if (cmp_nocase_uh(str, "rg8i") == 0 || cmp_nocase_uh(str, "r8g8i") == 0) {
2383 return F_rg8i;
2384 } else if (cmp_nocase_uh(str, "rgb8i") == 0 || cmp_nocase_uh(str, "r8g8b8i") == 0) {
2385 return F_rgb8i;
2386 } else if (cmp_nocase_uh(str, "rgba8i") == 0 || cmp_nocase_uh(str, "r8g8b8a8i") == 0) {
2387 return F_rgba8i;
2388 } else if (cmp_nocase(str, "r11g11b10") == 0) {
2389 return F_r11_g11_b10;
2390 } else if (cmp_nocase(str, "rgb9_e5") == 0) {
2391 return F_rgb9_e5;
2392 } else if (cmp_nocase_uh(str, "rgb10_a2") == 0 || cmp_nocase(str, "r10g10b10a2") == 0) {
2393 return F_rgb10_a2;
2394 } else if (cmp_nocase_uh(str, "rg") == 0) {
2395 return F_rg;
2396 } else if (cmp_nocase_uh(str, "r16i") == 0) {
2397 return F_r16i;
2398 } else if (cmp_nocase_uh(str, "rg16i") == 0 || cmp_nocase_uh(str, "r16g16i") == 0) {
2399 return F_rg16i;
2400 } else if (cmp_nocase_uh(str, "rgb16i") == 0 || cmp_nocase_uh(str, "r16g16b16i") == 0) {
2401 return F_rgb16i;
2402 } else if (cmp_nocase_uh(str, "rgba16i") == 0 || cmp_nocase_uh(str, "r16g16b16a16i") == 0) {
2403 return F_rgba16i;
2404 } else if (cmp_nocase_uh(str, "rg32i") == 0 || cmp_nocase_uh(str, "r32g32i") == 0) {
2405 return F_rg32i;
2406 } else if (cmp_nocase_uh(str, "rgb32i") == 0 || cmp_nocase_uh(str, "r32g32b32i") == 0) {
2407 return F_rgb32i;
2408 } else if (cmp_nocase_uh(str, "rgba32i") == 0 || cmp_nocase_uh(str, "r32g32b32a32i") == 0) {
2409 return F_rgba32i;
2410 }
2411
2412 gobj_cat->error()
2413 << "Invalid Texture::Format value: " << str << "\n";
2414 return F_rgba;
2415}
2416
2417/**
2418 * Returns the indicated CompressionMode converted to a string word.
2419 */
2421format_compression_mode(CompressionMode cm) {
2422 switch (cm) {
2423 case CM_default:
2424 return "default";
2425 case CM_off:
2426 return "off";
2427 case CM_on:
2428 return "on";
2429 case CM_fxt1:
2430 return "fxt1";
2431 case CM_dxt1:
2432 return "dxt1";
2433 case CM_dxt2:
2434 return "dxt2";
2435 case CM_dxt3:
2436 return "dxt3";
2437 case CM_dxt4:
2438 return "dxt4";
2439 case CM_dxt5:
2440 return "dxt5";
2441 case CM_pvr1_2bpp:
2442 return "pvr1_2bpp";
2443 case CM_pvr1_4bpp:
2444 return "pvr1_4bpp";
2445 case CM_rgtc:
2446 return "rgtc";
2447 case CM_etc1:
2448 return "etc1";
2449 case CM_etc2:
2450 return "etc2";
2451 case CM_eac:
2452 return "eac";
2453 }
2454
2455 return "**invalid**";
2456}
2457
2458/**
2459 * Returns the CompressionMode value associated with the given string
2460 * representation.
2461 */
2462Texture::CompressionMode Texture::
2463string_compression_mode(const string &str) {
2464 if (cmp_nocase_uh(str, "default") == 0) {
2465 return CM_default;
2466 } else if (cmp_nocase_uh(str, "off") == 0) {
2467 return CM_off;
2468 } else if (cmp_nocase_uh(str, "on") == 0) {
2469 return CM_on;
2470 } else if (cmp_nocase_uh(str, "fxt1") == 0) {
2471 return CM_fxt1;
2472 } else if (cmp_nocase_uh(str, "dxt1") == 0) {
2473 return CM_dxt1;
2474 } else if (cmp_nocase_uh(str, "dxt2") == 0) {
2475 return CM_dxt2;
2476 } else if (cmp_nocase_uh(str, "dxt3") == 0) {
2477 return CM_dxt3;
2478 } else if (cmp_nocase_uh(str, "dxt4") == 0) {
2479 return CM_dxt4;
2480 } else if (cmp_nocase_uh(str, "dxt5") == 0) {
2481 return CM_dxt5;
2482 } else if (cmp_nocase_uh(str, "pvr1_2bpp") == 0) {
2483 return CM_pvr1_2bpp;
2484 } else if (cmp_nocase_uh(str, "pvr1_4bpp") == 0) {
2485 return CM_pvr1_4bpp;
2486 } else if (cmp_nocase_uh(str, "rgtc") == 0) {
2487 return CM_rgtc;
2488 } else if (cmp_nocase_uh(str, "etc1") == 0) {
2489 return CM_etc1;
2490 } else if (cmp_nocase_uh(str, "etc2") == 0) {
2491 return CM_etc2;
2492 } else if (cmp_nocase_uh(str, "eac") == 0) {
2493 return CM_eac;
2494 }
2495
2496 gobj_cat->error()
2497 << "Invalid Texture::CompressionMode value: " << str << "\n";
2498 return CM_default;
2499}
2500
2501
2502/**
2503 * Returns the indicated QualityLevel converted to a string word.
2504 */
2506format_quality_level(QualityLevel ql) {
2507 switch (ql) {
2508 case QL_default:
2509 return "default";
2510 case QL_fastest:
2511 return "fastest";
2512 case QL_normal:
2513 return "normal";
2514 case QL_best:
2515 return "best";
2516 }
2517
2518 return "**invalid**";
2519}
2520
2521/**
2522 * Returns the QualityLevel value associated with the given string
2523 * representation.
2524 */
2525Texture::QualityLevel Texture::
2526string_quality_level(const string &str) {
2527 if (cmp_nocase(str, "default") == 0) {
2528 return QL_default;
2529 } else if (cmp_nocase(str, "fastest") == 0) {
2530 return QL_fastest;
2531 } else if (cmp_nocase(str, "normal") == 0) {
2532 return QL_normal;
2533 } else if (cmp_nocase(str, "best") == 0) {
2534 return QL_best;
2535 }
2536
2537 gobj_cat->error()
2538 << "Invalid Texture::QualityLevel value: " << str << "\n";
2539 return QL_default;
2540}
2541
2542/**
2543 * This method is called by the GraphicsEngine at the beginning of the frame
2544 * *after* a texture has been successfully uploaded to graphics memory. It is
2545 * intended as a callback so the texture can release its RAM image, if
2546 * _keep_ram_image is false.
2547 *
2548 * This is called indirectly when the GSG calls
2549 * GraphicsEngine::texture_uploaded().
2550 */
2553 CDLockedReader cdata(_cycler);
2554
2555 if (!keep_texture_ram && !cdata->_keep_ram_image) {
2556 // Once we have prepared the texture, we can generally safely remove the
2557 // pixels from main RAM. The GSG is now responsible for remembering what
2558 // it looks like.
2559
2560 CDWriter cdataw(_cycler, cdata, false);
2561 if (gobj_cat.is_debug()) {
2562 gobj_cat.debug()
2563 << "Dumping RAM for texture " << get_name() << "\n";
2564 }
2565 do_clear_ram_image(cdataw);
2566 }
2567}
2568
2569/**
2570 * Should be overridden by derived classes to return true if cull_callback()
2571 * has been defined. Otherwise, returns false to indicate cull_callback()
2572 * does not need to be called for this node during the cull traversal.
2573 */
2575has_cull_callback() const {
2576 return false;
2577}
2578
2579/**
2580 * If has_cull_callback() returns true, this function will be called during
2581 * the cull traversal to perform any additional operations that should be
2582 * performed at cull time.
2583 *
2584 * This is called each time the Texture is discovered applied to a Geom in the
2585 * traversal. It should return true if the Geom is visible, false if it
2586 * should be omitted.
2587 */
2590 return true;
2591}
2592
2593/**
2594 * A factory function to make a new Texture, used to pass to the TexturePool.
2595 */
2596PT(Texture) Texture::
2597make_texture() {
2598 return new Texture;
2599}
2600
2601/**
2602 * Returns true if the indicated component type is unsigned, false otherwise.
2603 */
2605is_unsigned(Texture::ComponentType ctype) {
2606 return (ctype == T_unsigned_byte ||
2607 ctype == T_unsigned_short ||
2608 ctype == T_unsigned_int_24_8 ||
2609 ctype == T_unsigned_int);
2610}
2611
2612/**
2613 * Returns true if the indicated compression mode is one of the specific
2614 * compression types, false otherwise.
2615 */
2617is_specific(Texture::CompressionMode compression) {
2618 switch (compression) {
2619 case CM_default:
2620 case CM_off:
2621 case CM_on:
2622 return false;
2623
2624 default:
2625 return true;
2626 }
2627}
2628
2629/**
2630 * Returns true if the indicated format includes alpha, false otherwise.
2631 */
2633has_alpha(Format format) {
2634 switch (format) {
2635 case F_alpha:
2636 case F_rgba:
2637 case F_rgbm:
2638 case F_rgba4:
2639 case F_rgba5:
2640 case F_rgba8:
2641 case F_rgba12:
2642 case F_rgba16:
2643 case F_rgba32:
2644 case F_luminance_alpha:
2645 case F_luminance_alphamask:
2646 case F_srgb_alpha:
2647 case F_sluminance_alpha:
2648 case F_rgba8i:
2649 case F_rgb10_a2:
2650 case F_rgba16i:
2651 case F_rgba32i:
2652 return true;
2653
2654 default:
2655 return false;
2656 }
2657}
2658
2659/**
2660 * Returns true if the indicated format includes a binary alpha only, false
2661 * otherwise.
2662 */
2664has_binary_alpha(Format format) {
2665 switch (format) {
2666 case F_rgbm:
2667 return true;
2668
2669 default:
2670 return false;
2671 }
2672}
2673
2674/**
2675 * Returns true if the indicated format is in the sRGB color space, false
2676 * otherwise.
2677 */
2679is_srgb(Format format) {
2680 switch (format) {
2681 case F_srgb:
2682 case F_srgb_alpha:
2683 case F_sluminance:
2684 case F_sluminance_alpha:
2685 return true;
2686
2687 default:
2688 return false;
2689 }
2690}
2691
2692/**
2693 * Returns true if the indicated format is an integer format, false otherwise.
2694 */
2696is_integer(Format format) {
2697 switch (format) {
2698 case F_r32i:
2699 case F_r8i:
2700 case F_rg8i:
2701 case F_rgb8i:
2702 case F_rgba8i:
2703 case F_r16i:
2704 case F_rg16i:
2705 case F_rgb16i:
2706 case F_rgba16i:
2707 case F_rg32i:
2708 case F_rgb32i:
2709 case F_rgba32i:
2710 return true;
2711
2712 default:
2713 return false;
2714 }
2715}
2716
2717/**
2718 * Computes the proper size of the texture, based on the original size, the
2719 * filename, and the resizing whims of the config file.
2720 *
2721 * x_size and y_size should be loaded with the texture image's original size
2722 * on disk. On return, they will be loaded with the texture's in-memory
2723 * target size. The return value is true if the size has been adjusted, or
2724 * false if it is the same.
2725 */
2727adjust_size(int &x_size, int &y_size, const string &name,
2728 bool for_padding, AutoTextureScale auto_texture_scale) {
2729 bool exclude = false;
2730 int num_excludes = exclude_texture_scale.get_num_unique_values();
2731 for (int i = 0; i < num_excludes && !exclude; ++i) {
2732 GlobPattern pat(exclude_texture_scale.get_unique_value(i));
2733 if (pat.matches(name)) {
2734 exclude = true;
2735 }
2736 }
2737
2738 int new_x_size = x_size;
2739 int new_y_size = y_size;
2740
2741 if (!exclude) {
2742 new_x_size = (int)cfloor(new_x_size * texture_scale + 0.5);
2743 new_y_size = (int)cfloor(new_y_size * texture_scale + 0.5);
2744
2745 // Don't auto-scale below 4 in either dimension. This causes problems for
2746 // DirectX and texture compression.
2747 new_x_size = min(max(new_x_size, (int)texture_scale_limit), x_size);
2748 new_y_size = min(max(new_y_size, (int)texture_scale_limit), y_size);
2749 }
2750
2751 AutoTextureScale ats = auto_texture_scale;
2752 if (ats == ATS_unspecified) {
2753 ats = get_textures_power_2();
2754 }
2755 if (!for_padding && ats == ATS_pad) {
2756 // If we're not calculating the padding size--that is, we're calculating
2757 // the initial scaling size instead--then ignore ATS_pad, and treat it the
2758 // same as ATS_none.
2759 ats = ATS_none;
2760 }
2761
2762 switch (ats) {
2763 case ATS_down:
2764 new_x_size = down_to_power_2(new_x_size);
2765 new_y_size = down_to_power_2(new_y_size);
2766 break;
2767
2768 case ATS_up:
2769 case ATS_pad:
2770 new_x_size = up_to_power_2(new_x_size);
2771 new_y_size = up_to_power_2(new_y_size);
2772 break;
2773
2774 case ATS_none:
2775 case ATS_unspecified:
2776 break;
2777 }
2778
2779 ats = textures_square.get_value();
2780 if (!for_padding && ats == ATS_pad) {
2781 ats = ATS_none;
2782 }
2783 switch (ats) {
2784 case ATS_down:
2785 new_x_size = new_y_size = min(new_x_size, new_y_size);
2786 break;
2787
2788 case ATS_up:
2789 case ATS_pad:
2790 new_x_size = new_y_size = max(new_x_size, new_y_size);
2791 break;
2792
2793 case ATS_none:
2794 case ATS_unspecified:
2795 break;
2796 }
2797
2798 if (!exclude) {
2799 int max_dimension = max_texture_dimension;
2800
2801 if (max_dimension < 0) {
2803 if (gsg != nullptr) {
2804 max_dimension = gsg->get_max_texture_dimension();
2805 }
2806 }
2807
2808 if (max_dimension > 0) {
2809 new_x_size = min(new_x_size, (int)max_dimension);
2810 new_y_size = min(new_y_size, (int)max_dimension);
2811 }
2812 }
2813
2814 if (x_size != new_x_size || y_size != new_y_size) {
2815 x_size = new_x_size;
2816 y_size = new_y_size;
2817 return true;
2818 }
2819
2820 return false;
2821}
2822
2823/**
2824 * May be called prior to calling read_txo() or any bam-related Texture-
2825 * creating callback, to ensure that the proper dynamic libraries for a
2826 * Texture of the current class type, and the indicated filename, have been
2827 * already loaded.
2828 *
2829 * This is a low-level function that should not normally need to be called
2830 * directly by the user.
2831 *
2832 * Note that for best results you must first create a Texture object of the
2833 * appropriate class type for your filename, for instance with
2834 * TexturePool::make_texture().
2835 */
2837ensure_loader_type(const Filename &filename) {
2838 // For a plain Texture type, this doesn't need to do anything.
2839}
2840
2841/**
2842 * Called by TextureContext to give the Texture a chance to mark itself dirty
2843 * before rendering, if necessary.
2844 */
2845void Texture::
2846reconsider_dirty() {
2847}
2848
2849/**
2850 * Works like adjust_size, but also considers the texture class. Movie
2851 * textures, for instance, always pad outwards, regardless of textures-
2852 * power-2.
2853 */
2854bool Texture::
2855do_adjust_this_size(const CData *cdata, int &x_size, int &y_size, const string &name,
2856 bool for_padding) const {
2857 return adjust_size(x_size, y_size, name, for_padding, cdata->_auto_texture_scale);
2858}
2859
2860/**
2861 * The internal implementation of the various read() methods.
2862 */
2863bool Texture::
2864do_read(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
2865 int primary_file_num_channels, int alpha_file_channel,
2866 int z, int n, bool read_pages, bool read_mipmaps,
2867 const LoaderOptions &options, BamCacheRecord *record) {
2868 PStatTimer timer(_texture_read_pcollector);
2869
2870 if (options.get_auto_texture_scale() != ATS_unspecified) {
2871 cdata->_auto_texture_scale = options.get_auto_texture_scale();
2872 }
2873
2874 bool header_only = ((options.get_texture_flags() & (LoaderOptions::TF_preload | LoaderOptions::TF_preload_simple)) == 0);
2875 if (record != nullptr) {
2876 header_only = false;
2877 }
2878
2879 if ((z == 0 || read_pages) && (n == 0 || read_mipmaps)) {
2880 // When we re-read the page 0 of the base image, we clear everything and
2881 // start over.
2882 do_clear_ram_image(cdata);
2883 }
2884
2885 if (is_txo_filename(fullpath)) {
2886 if (record != nullptr) {
2887 record->add_dependent_file(fullpath);
2888 }
2889 return do_read_txo_file(cdata, fullpath);
2890 }
2891
2892 if (is_dds_filename(fullpath)) {
2893 if (record != nullptr) {
2894 record->add_dependent_file(fullpath);
2895 }
2896 return do_read_dds_file(cdata, fullpath, header_only);
2897 }
2898
2899 if (is_ktx_filename(fullpath)) {
2900 if (record != nullptr) {
2901 record->add_dependent_file(fullpath);
2902 }
2903 return do_read_ktx_file(cdata, fullpath, header_only);
2904 }
2905
2906 // If read_pages or read_mipmaps is specified, then z and n actually
2907 // indicate z_size and n_size, respectively--the numerical limits on which
2908 // to search for filenames.
2909 int z_size = z;
2910 int n_size = n;
2911
2912 // Certain texture types have an implicit z_size. If z_size is omitted,
2913 // choose an appropriate default based on the texture type.
2914 if (z_size == 0) {
2915 switch (cdata->_texture_type) {
2916 case TT_1d_texture:
2917 case TT_2d_texture:
2918 case TT_buffer_texture:
2919 z_size = 1;
2920 break;
2921
2922 case TT_cube_map:
2923 z_size = 6;
2924 break;
2925
2926 default:
2927 break;
2928 }
2929 }
2930
2931 int num_views = 0;
2932 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
2933 // We'll be loading a multiview texture.
2934 read_pages = true;
2935 if (options.get_texture_num_views() != 0) {
2936 num_views = options.get_texture_num_views();
2937 do_set_num_views(cdata, num_views);
2938 }
2939 }
2940
2942
2943 if (read_pages && read_mipmaps) {
2944 // Read a sequence of pages * mipmap levels.
2945 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
2946 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
2947 do_set_z_size(cdata, z_size);
2948
2949 n = 0;
2950 while (true) {
2951 // For mipmap level 0, the total number of pages might be determined by
2952 // the number of files we find. After mipmap level 0, though, the
2953 // number of pages is predetermined.
2954 if (n != 0) {
2955 z_size = do_get_expected_mipmap_z_size(cdata, n);
2956 }
2957
2958 z = 0;
2959
2960 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2961 Filename alpha_n_pattern = Filename::pattern_filename(alpha_fullpath_pattern.get_filename_index(z));
2962
2963 if (!n_pattern.has_hash()) {
2964 gobj_cat.error()
2965 << "Filename requires two different hash sequences: " << fullpath
2966 << "\n";
2967 return false;
2968 }
2969
2970 Filename file = n_pattern.get_filename_index(n);
2971 Filename alpha_file = alpha_n_pattern.get_filename_index(n);
2972
2973 if ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
2974 (n_size != 0 && n < n_size)) {
2975 // Continue through the loop.
2976 } else {
2977 // We've reached the end of the mipmap sequence.
2978 break;
2979 }
2980
2981 int num_pages = z_size * num_views;
2982 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
2983 (num_pages != 0 && z < num_pages)) {
2984 if (!do_read_one(cdata, file, alpha_file, z, n, primary_file_num_channels,
2985 alpha_file_channel, options, header_only, record)) {
2986 return false;
2987 }
2988 ++z;
2989
2990 n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2991 file = n_pattern.get_filename_index(n);
2992 alpha_file = alpha_n_pattern.get_filename_index(n);
2993 }
2994
2995 if (n == 0 && n_size == 0) {
2996 // If n_size is not specified, it gets implicitly set after we read
2997 // the base texture image (which determines the size of the texture).
2998 n_size = do_get_expected_num_mipmap_levels(cdata);
2999 }
3000 ++n;
3001 }
3002 cdata->_fullpath = fullpath_pattern;
3003 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3004
3005 } else if (read_pages) {
3006 // Read a sequence of cube map or 3-D texture pages.
3007 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3008 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3009 if (!fullpath_pattern.has_hash()) {
3010 gobj_cat.error()
3011 << "Filename requires a hash mark: " << fullpath
3012 << "\n";
3013 return false;
3014 }
3015
3016 do_set_z_size(cdata, z_size);
3017 z = 0;
3018 Filename file = fullpath_pattern.get_filename_index(z);
3019 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3020
3021 int num_pages = z_size * num_views;
3022 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
3023 (num_pages != 0 && z < num_pages)) {
3024 if (!do_read_one(cdata, file, alpha_file, z, 0, primary_file_num_channels,
3025 alpha_file_channel, options, header_only, record)) {
3026 return false;
3027 }
3028 ++z;
3029
3030 file = fullpath_pattern.get_filename_index(z);
3031 alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3032 }
3033 cdata->_fullpath = fullpath_pattern;
3034 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3035
3036 } else if (read_mipmaps) {
3037 // Read a sequence of mipmap levels.
3038 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3039 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3040 if (!fullpath_pattern.has_hash()) {
3041 gobj_cat.error()
3042 << "Filename requires a hash mark: " << fullpath
3043 << "\n";
3044 return false;
3045 }
3046
3047 n = 0;
3048 Filename file = fullpath_pattern.get_filename_index(n);
3049 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3050
3051 while ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
3052 (n_size != 0 && n < n_size)) {
3053 if (!do_read_one(cdata, file, alpha_file, z, n,
3054 primary_file_num_channels, alpha_file_channel,
3055 options, header_only, record)) {
3056 return false;
3057 }
3058 ++n;
3059
3060 if (n_size == 0 && n >= do_get_expected_num_mipmap_levels(cdata)) {
3061 // Don't try to read more than the requisite number of mipmap levels
3062 // (unless the user insisted on it for some reason).
3063 break;
3064 }
3065
3066 file = fullpath_pattern.get_filename_index(n);
3067 alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3068 }
3069 cdata->_fullpath = fullpath_pattern;
3070 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3071
3072 } else {
3073 // Just an ordinary read of one file.
3074 if (!do_read_one(cdata, fullpath, alpha_fullpath, z, n,
3075 primary_file_num_channels, alpha_file_channel,
3076 options, header_only, record)) {
3077 return false;
3078 }
3079 }
3080
3081 cdata->_has_read_pages = read_pages;
3082 cdata->_has_read_mipmaps = read_mipmaps;
3083 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
3084
3085 if (header_only) {
3086 // If we were only supposed to be checking the image header information,
3087 // don't let the Texture think that it's got the image now.
3088 do_clear_ram_image(cdata);
3089 } else {
3090 if ((options.get_texture_flags() & LoaderOptions::TF_preload) != 0) {
3091 // If we intend to keep the ram image around, consider compressing it
3092 // etc.
3093 bool generate_mipmaps = ((options.get_texture_flags() & LoaderOptions::TF_generate_mipmaps) != 0);
3094 bool allow_compression = ((options.get_texture_flags() & LoaderOptions::TF_allow_compression) != 0);
3095 do_consider_auto_process_ram_image(cdata, generate_mipmaps || uses_mipmaps(), allow_compression);
3096 }
3097 }
3098
3099 return true;
3100}
3101
3102/**
3103 * Called only from do_read(), this method reads a single image file, either
3104 * one page or one mipmap level.
3105 */
3106bool Texture::
3107do_read_one(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
3108 int z, int n, int primary_file_num_channels, int alpha_file_channel,
3109 const LoaderOptions &options, bool header_only, BamCacheRecord *record) {
3110 if (record != nullptr) {
3111 nassertr(!header_only, false);
3112 record->add_dependent_file(fullpath);
3113 }
3114
3115 PNMImage image;
3116 PfmFile pfm;
3117 PNMReader *image_reader = image.make_reader(fullpath, nullptr, false);
3118 if (image_reader == nullptr) {
3119 gobj_cat.error()
3120 << "Texture::read() - couldn't read: " << fullpath << endl;
3121 return false;
3122 }
3123 image.copy_header_from(*image_reader);
3124
3125 AutoTextureScale auto_texture_scale = do_get_auto_texture_scale(cdata);
3126
3127 // If it's a floating-point image file, read it by default into a floating-
3128 // point texture.
3129 bool read_floating_point;
3130 int texture_load_type = (options.get_texture_flags() & (LoaderOptions::TF_integer | LoaderOptions::TF_float));
3131 switch (texture_load_type) {
3132 case LoaderOptions::TF_integer:
3133 read_floating_point = false;
3134 break;
3135
3136 case LoaderOptions::TF_float:
3137 read_floating_point = true;
3138 break;
3139
3140 default:
3141 // Neither TF_integer nor TF_float was specified; determine which way the
3142 // texture wants to be loaded.
3143 read_floating_point = (image_reader->is_floating_point());
3144 if (!alpha_fullpath.empty()) {
3145 read_floating_point = false;
3146 }
3147 }
3148
3149 if (header_only || textures_header_only) {
3150 int x_size = image.get_x_size();
3151 int y_size = image.get_y_size();
3152 if (z == 0 && n == 0) {
3153 cdata->_orig_file_x_size = x_size;
3154 cdata->_orig_file_y_size = y_size;
3155 }
3156
3157 if (textures_header_only) {
3158 // In this mode, we never intend to load the actual texture image
3159 // anyway, so we don't even need to make the size right.
3160 x_size = 1;
3161 y_size = 1;
3162
3163 } else {
3164 adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale);
3165 }
3166
3167 if (read_floating_point) {
3168 pfm.clear(x_size, y_size, image.get_num_channels());
3169 } else {
3170 image = PNMImage(x_size, y_size, image.get_num_channels(),
3171 image.get_maxval(), image.get_type(),
3172 image.get_color_space());
3173 image.fill(0.2, 0.3, 1.0);
3174 if (image.has_alpha()) {
3175 image.alpha_fill(1.0);
3176 }
3177 }
3178 delete image_reader;
3179
3180 } else {
3181 if (z == 0 && n == 0) {
3182 int x_size = image.get_x_size();
3183 int y_size = image.get_y_size();
3184
3185 cdata->_orig_file_x_size = x_size;
3186 cdata->_orig_file_y_size = y_size;
3187
3188 if (adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale)) {
3189 image.set_read_size(x_size, y_size);
3190 }
3191 } else {
3192 image.set_read_size(do_get_expected_mipmap_x_size(cdata, n),
3193 do_get_expected_mipmap_y_size(cdata, n));
3194 }
3195
3196 if (image.get_x_size() != image.get_read_x_size() ||
3197 image.get_y_size() != image.get_read_y_size()) {
3198 gobj_cat.info()
3199 << "Implicitly rescaling " << fullpath.get_basename() << " from "
3200 << image.get_x_size() << " by " << image.get_y_size() << " to "
3201 << image.get_read_x_size() << " by " << image.get_read_y_size()
3202 << "\n";
3203 }
3204
3205 bool success;
3206 if (read_floating_point) {
3207 success = pfm.read(image_reader);
3208 } else {
3209 success = image.read(image_reader);
3210 }
3211
3212 if (!success) {
3213 gobj_cat.error()
3214 << "Texture::read() - couldn't read: " << fullpath << endl;
3215 return false;
3216 }
3218 }
3219
3220 PNMImage alpha_image;
3221 if (!alpha_fullpath.empty()) {
3222 PNMReader *alpha_image_reader = alpha_image.make_reader(alpha_fullpath, nullptr, false);
3223 if (alpha_image_reader == nullptr) {
3224 gobj_cat.error()
3225 << "Texture::read() - couldn't read: " << alpha_fullpath << endl;
3226 return false;
3227 }
3228 alpha_image.copy_header_from(*alpha_image_reader);
3229
3230 if (record != nullptr) {
3231 record->add_dependent_file(alpha_fullpath);
3232 }
3233
3234 if (header_only || textures_header_only) {
3235 int x_size = image.get_x_size();
3236 int y_size = image.get_y_size();
3237 alpha_image = PNMImage(x_size, y_size, alpha_image.get_num_channels(),
3238 alpha_image.get_maxval(), alpha_image.get_type(),
3239 alpha_image.get_color_space());
3240 alpha_image.fill(1.0);
3241 if (alpha_image.has_alpha()) {
3242 alpha_image.alpha_fill(1.0);
3243 }
3244 delete alpha_image_reader;
3245
3246 } else {
3247 if (image.get_x_size() != alpha_image.get_x_size() ||
3248 image.get_y_size() != alpha_image.get_y_size()) {
3249 gobj_cat.info()
3250 << "Implicitly rescaling " << alpha_fullpath.get_basename()
3251 << " from " << alpha_image.get_x_size() << " by "
3252 << alpha_image.get_y_size() << " to " << image.get_x_size()
3253 << " by " << image.get_y_size() << "\n";
3254 alpha_image.set_read_size(image.get_x_size(), image.get_y_size());
3255 }
3256
3257 if (!alpha_image.read(alpha_image_reader)) {
3258 gobj_cat.error()
3259 << "Texture::read() - couldn't read (alpha): " << alpha_fullpath << endl;
3260 return false;
3261 }
3263 }
3264 }
3265
3266 if (z == 0 && n == 0) {
3267 if (!has_name()) {
3268 set_name(fullpath.get_basename_wo_extension());
3269 }
3270 if (cdata->_filename.empty()) {
3271 cdata->_filename = fullpath;
3272 cdata->_alpha_filename = alpha_fullpath;
3273
3274 // The first time we set the filename via a read() operation, we clear
3275 // keep_ram_image. The user can always set it again later if he needs
3276 // to.
3277 cdata->_keep_ram_image = false;
3278 }
3279
3280 cdata->_fullpath = fullpath;
3281 cdata->_alpha_fullpath = alpha_fullpath;
3282 }
3283
3284 if (!alpha_fullpath.empty()) {
3285 // The grayscale (alpha channel) image must be the same size as the main
3286 // image. This should really have been already guaranteed by the above.
3287 if (image.get_x_size() != alpha_image.get_x_size() ||
3288 image.get_y_size() != alpha_image.get_y_size()) {
3289 gobj_cat.info()
3290 << "Automatically rescaling " << alpha_fullpath.get_basename()
3291 << " from " << alpha_image.get_x_size() << " by "
3292 << alpha_image.get_y_size() << " to " << image.get_x_size()
3293 << " by " << image.get_y_size() << "\n";
3294
3295 PNMImage scaled(image.get_x_size(), image.get_y_size(),
3296 alpha_image.get_num_channels(),
3297 alpha_image.get_maxval(), alpha_image.get_type(),
3298 alpha_image.get_color_space());
3299 scaled.quick_filter_from(alpha_image);
3301 alpha_image = scaled;
3302 }
3303 }
3304
3305 if (n == 0) {
3306 consider_downgrade(image, primary_file_num_channels, get_name());
3307 cdata->_primary_file_num_channels = image.get_num_channels();
3308 cdata->_alpha_file_channel = 0;
3309 }
3310
3311 if (!alpha_fullpath.empty()) {
3312 // Make the original image a 4-component image by taking the grayscale
3313 // value from the second image.
3314 image.add_alpha();
3315
3316 if (alpha_file_channel == 4 ||
3317 (alpha_file_channel == 2 && alpha_image.get_num_channels() == 2)) {
3318
3319 if (!alpha_image.has_alpha()) {
3320 gobj_cat.error()
3321 << alpha_fullpath.get_basename() << " has no channel " << alpha_file_channel << ".\n";
3322 } else {
3323 // Use the alpha channel.
3324 for (int x = 0; x < image.get_x_size(); x++) {
3325 for (int y = 0; y < image.get_y_size(); y++) {
3326 image.set_alpha(x, y, alpha_image.get_alpha(x, y));
3327 }
3328 }
3329 }
3330 cdata->_alpha_file_channel = alpha_image.get_num_channels();
3331
3332 } else if (alpha_file_channel >= 1 && alpha_file_channel <= 3 &&
3333 alpha_image.get_num_channels() >= 3) {
3334 // Use the appropriate red, green, or blue channel.
3335 for (int x = 0; x < image.get_x_size(); x++) {
3336 for (int y = 0; y < image.get_y_size(); y++) {
3337 image.set_alpha(x, y, alpha_image.get_channel_val(x, y, alpha_file_channel - 1));
3338 }
3339 }
3340 cdata->_alpha_file_channel = alpha_file_channel;
3341
3342 } else {
3343 // Use the grayscale channel.
3344 for (int x = 0; x < image.get_x_size(); x++) {
3345 for (int y = 0; y < image.get_y_size(); y++) {
3346 image.set_alpha(x, y, alpha_image.get_gray(x, y));
3347 }
3348 }
3349 cdata->_alpha_file_channel = 0;
3350 }
3351 }
3352
3353 if (read_floating_point) {
3354 if (!do_load_one(cdata, pfm, fullpath.get_basename(), z, n, options)) {
3355 return false;
3356 }
3357 } else {
3358 // Now see if we want to pad the image within a larger power-of-2 image.
3359 int pad_x_size = 0;
3360 int pad_y_size = 0;
3361 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
3362 int new_x_size = image.get_x_size();
3363 int new_y_size = image.get_y_size();
3364 if (do_adjust_this_size(cdata, new_x_size, new_y_size, fullpath.get_basename(), true)) {
3365 pad_x_size = new_x_size - image.get_x_size();
3366 pad_y_size = new_y_size - image.get_y_size();
3367 PNMImage new_image(new_x_size, new_y_size, image.get_num_channels(),
3368 image.get_maxval(), image.get_type(),
3369 image.get_color_space());
3370 new_image.copy_sub_image(image, 0, new_y_size - image.get_y_size());
3371 image.take_from(new_image);
3372 }
3373 }
3374
3375 if (!do_load_one(cdata, image, fullpath.get_basename(), z, n, options)) {
3376 return false;
3377 }
3378
3379 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
3380 }
3381 return true;
3382}
3383
3384/**
3385 * Internal method to load a single page or mipmap level.
3386 */
3387bool Texture::
3388do_load_one(CData *cdata, const PNMImage &pnmimage, const string &name, int z, int n,
3389 const LoaderOptions &options) {
3390 if (cdata->_ram_images.size() <= 1 && n == 0) {
3391 // A special case for mipmap level 0. When we load mipmap level 0, unless
3392 // we already have mipmap levels, it determines the image properties like
3393 // size and number of components.
3394 if (!do_reconsider_z_size(cdata, z, options)) {
3395 return false;
3396 }
3397 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3398
3399 if (z == 0) {
3400 ComponentType component_type = T_unsigned_byte;
3401 xelval maxval = pnmimage.get_maxval();
3402 if (maxval > 255) {
3403 component_type = T_unsigned_short;
3404 }
3405
3406 if (!do_reconsider_image_properties(cdata, pnmimage.get_x_size(), pnmimage.get_y_size(),
3407 pnmimage.get_num_channels(), component_type,
3408 z, options)) {
3409 return false;
3410 }
3411 }
3412
3413 do_modify_ram_image(cdata);
3414 cdata->_loaded_from_image = true;
3415 }
3416
3417 do_modify_ram_mipmap_image(cdata, n);
3418
3419 // Ensure the PNMImage is an appropriate size.
3420 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3421 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3422 if (pnmimage.get_x_size() != x_size ||
3423 pnmimage.get_y_size() != y_size) {
3424 gobj_cat.info()
3425 << "Automatically rescaling " << name;
3426 if (n != 0) {
3427 gobj_cat.info(false)
3428 << " mipmap level " << n;
3429 }
3430 gobj_cat.info(false)
3431 << " from " << pnmimage.get_x_size() << " by "
3432 << pnmimage.get_y_size() << " to " << x_size << " by "
3433 << y_size << "\n";
3434
3435 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels(),
3436 pnmimage.get_maxval(), pnmimage.get_type(),
3437 pnmimage.get_color_space());
3438 scaled.quick_filter_from(pnmimage);
3440
3441 convert_from_pnmimage(cdata->_ram_images[n]._image,
3442 do_get_expected_ram_mipmap_page_size(cdata, n),
3443 x_size, 0, 0, z, scaled,
3444 cdata->_num_components, cdata->_component_width);
3445 } else {
3446 // Now copy the pixel data from the PNMImage into our internal
3447 // cdata->_image component.
3448 convert_from_pnmimage(cdata->_ram_images[n]._image,
3449 do_get_expected_ram_mipmap_page_size(cdata, n),
3450 x_size, 0, 0, z, pnmimage,
3451 cdata->_num_components, cdata->_component_width);
3452 }
3454
3455 return true;
3456}
3457
3458/**
3459 * Internal method to load a single page or mipmap level.
3460 */
3461bool Texture::
3462do_load_one(CData *cdata, const PfmFile &pfm, const string &name, int z, int n,
3463 const LoaderOptions &options) {
3464 if (cdata->_ram_images.size() <= 1 && n == 0) {
3465 // A special case for mipmap level 0. When we load mipmap level 0, unless
3466 // we already have mipmap levels, it determines the image properties like
3467 // size and number of components.
3468 if (!do_reconsider_z_size(cdata, z, options)) {
3469 return false;
3470 }
3471 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3472
3473 if (z == 0) {
3474 ComponentType component_type = T_float;
3475 if (!do_reconsider_image_properties(cdata, pfm.get_x_size(), pfm.get_y_size(),
3476 pfm.get_num_channels(), component_type,
3477 z, options)) {
3478 return false;
3479 }
3480 }
3481
3482 do_modify_ram_image(cdata);
3483 cdata->_loaded_from_image = true;
3484 }
3485
3486 do_modify_ram_mipmap_image(cdata, n);
3487
3488 // Ensure the PfmFile is an appropriate size.
3489 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3490 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3491 if (pfm.get_x_size() != x_size ||
3492 pfm.get_y_size() != y_size) {
3493 gobj_cat.info()
3494 << "Automatically rescaling " << name;
3495 if (n != 0) {
3496 gobj_cat.info(false)
3497 << " mipmap level " << n;
3498 }
3499 gobj_cat.info(false)
3500 << " from " << pfm.get_x_size() << " by "
3501 << pfm.get_y_size() << " to " << x_size << " by "
3502 << y_size << "\n";
3503
3504 PfmFile scaled(pfm);
3505 scaled.resize(x_size, y_size);
3507
3508 convert_from_pfm(cdata->_ram_images[n]._image,
3509 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3510 scaled, cdata->_num_components, cdata->_component_width);
3511 } else {
3512 // Now copy the pixel data from the PfmFile into our internal
3513 // cdata->_image component.
3514 convert_from_pfm(cdata->_ram_images[n]._image,
3515 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3516 pfm, cdata->_num_components, cdata->_component_width);
3517 }
3519
3520 return true;
3521}
3522
3523/**
3524 * Internal method to load an image into a section of a texture page or mipmap
3525 * level.
3526 */
3527bool Texture::
3528do_load_sub_image(CData *cdata, const PNMImage &image, int x, int y, int z, int n) {
3529 nassertr(n >= 0 && (size_t)n < cdata->_ram_images.size(), false);
3530
3531 int tex_x_size = do_get_expected_mipmap_x_size(cdata, n);
3532 int tex_y_size = do_get_expected_mipmap_y_size(cdata, n);
3533 int tex_z_size = do_get_expected_mipmap_z_size(cdata, n);
3534
3535 nassertr(x >= 0 && x < tex_x_size, false);
3536 nassertr(y >= 0 && y < tex_y_size, false);
3537 nassertr(z >= 0 && z < tex_z_size, false);
3538
3539 nassertr(image.get_x_size() + x <= tex_x_size, false);
3540 nassertr(image.get_y_size() + y <= tex_y_size, false);
3541
3542 // Flip y
3543 y = cdata->_y_size - (image.get_y_size() + y);
3544
3545 cdata->inc_image_modified();
3546 do_modify_ram_mipmap_image(cdata, n);
3547 convert_from_pnmimage(cdata->_ram_images[n]._image,
3548 do_get_expected_ram_mipmap_page_size(cdata, n),
3549 tex_x_size, x, y, z, image,
3550 cdata->_num_components, cdata->_component_width);
3551
3552 return true;
3553}
3554
3555/**
3556 * Called internally when read() detects a txo file. Assumes the lock is
3557 * already held.
3558 */
3559bool Texture::
3560do_read_txo_file(CData *cdata, const Filename &fullpath) {
3562
3563 Filename filename = Filename::binary_filename(fullpath);
3564 PT(VirtualFile) file = vfs->get_file(filename);
3565 if (file == nullptr) {
3566 // No such file.
3567 gobj_cat.error()
3568 << "Could not find " << fullpath << "\n";
3569 return false;
3570 }
3571
3572 if (gobj_cat.is_debug()) {
3573 gobj_cat.debug()
3574 << "Reading texture object " << filename << "\n";
3575 }
3576
3577 istream *in = file->open_read_file(true);
3578 if (in == nullptr) {
3579 gobj_cat.error()
3580 << "Failed to open " << filename << " for reading.\n";
3581 return false;
3582 }
3583
3584 bool success = do_read_txo(cdata, *in, fullpath);
3585 vfs->close_read_file(in);
3586
3587 cdata->_fullpath = fullpath;
3588 cdata->_alpha_fullpath = Filename();
3589 cdata->_keep_ram_image = false;
3590
3591 return success;
3592}
3593
3594/**
3595 *
3596 */
3597bool Texture::
3598do_read_txo(CData *cdata, istream &in, const string &filename) {
3599 PT(Texture) other = make_from_txo(in, filename);
3600 if (other == nullptr) {
3601 return false;
3602 }
3603
3604 CDReader cdata_other(other->_cycler);
3605 Namable::operator = (*other);
3606 do_assign(cdata, other, cdata_other);
3607
3608 cdata->_loaded_from_image = true;
3609 cdata->_loaded_from_txo = true;
3610 cdata->_has_read_pages = false;
3611 cdata->_has_read_mipmaps = false;
3612 cdata->_num_mipmap_levels_read = 0;
3613 return true;
3614}
3615
3616/**
3617 * Called internally when read() detects a DDS file. Assumes the lock is
3618 * already held.
3619 */
3620bool Texture::
3621do_read_dds_file(CData *cdata, const Filename &fullpath, bool header_only) {
3623
3624 Filename filename = Filename::binary_filename(fullpath);
3625 PT(VirtualFile) file = vfs->get_file(filename);
3626 if (file == nullptr) {
3627 // No such file.
3628 gobj_cat.error()
3629 << "Could not find " << fullpath << "\n";
3630 return false;
3631 }
3632
3633 if (gobj_cat.is_debug()) {
3634 gobj_cat.debug()
3635 << "Reading DDS file " << filename << "\n";
3636 }
3637
3638 istream *in = file->open_read_file(true);
3639 if (in == nullptr) {
3640 gobj_cat.error()
3641 << "Failed to open " << filename << " for reading.\n";
3642 return false;
3643 }
3644
3645 bool success = do_read_dds(cdata, *in, fullpath, header_only);
3646 vfs->close_read_file(in);
3647
3648 if (!has_name()) {
3649 set_name(fullpath.get_basename_wo_extension());
3650 }
3651
3652 cdata->_fullpath = fullpath;
3653 cdata->_alpha_fullpath = Filename();
3654 cdata->_keep_ram_image = false;
3655
3656 return success;
3657}
3658
3659/**
3660 *
3661 */
3662bool Texture::
3663do_read_dds(CData *cdata, istream &in, const string &filename, bool header_only) {
3664 StreamReader dds(in);
3665
3666 // DDS header (19 words)
3667 DDSHeader header;
3668 header.dds_magic = dds.get_uint32();
3669 header.dds_size = dds.get_uint32();
3670 header.dds_flags = dds.get_uint32();
3671 header.height = dds.get_uint32();
3672 header.width = dds.get_uint32();
3673 header.pitch = dds.get_uint32();
3674 header.depth = dds.get_uint32();
3675 header.num_levels = dds.get_uint32();
3676 dds.skip_bytes(44);
3677
3678 // Pixelformat (8 words)
3679 header.pf.pf_size = dds.get_uint32();
3680 header.pf.pf_flags = dds.get_uint32();
3681 header.pf.four_cc = dds.get_uint32();
3682 header.pf.rgb_bitcount = dds.get_uint32();
3683 header.pf.r_mask = dds.get_uint32();
3684 header.pf.g_mask = dds.get_uint32();
3685 header.pf.b_mask = dds.get_uint32();
3686 header.pf.a_mask = dds.get_uint32();
3687
3688 // Caps (4 words)
3689 header.caps.caps1 = dds.get_uint32();
3690 header.caps.caps2 = dds.get_uint32();
3691 header.caps.ddsx = dds.get_uint32();
3692 dds.skip_bytes(4);
3693
3694 // Pad out to 32 words
3695 dds.skip_bytes(4);
3696
3697 if (header.dds_magic != DDS_MAGIC || (in.fail() || in.eof())) {
3698 gobj_cat.error()
3699 << filename << " is not a DDS file.\n";
3700 return false;
3701 }
3702
3703 if ((header.dds_flags & DDSD_MIPMAPCOUNT) == 0) {
3704 // No bit set means only the base mipmap level.
3705 header.num_levels = 1;
3706
3707 } else if (header.num_levels == 0) {
3708 // Some files seem to have this set to 0 for some reason--existing readers
3709 // assume 0 means 1.
3710 header.num_levels = 1;
3711 }
3712
3713 TextureType texture_type;
3714 if (header.caps.caps2 & DDSCAPS2_CUBEMAP) {
3715 static const unsigned int all_faces =
3716 (DDSCAPS2_CUBEMAP_POSITIVEX |
3717 DDSCAPS2_CUBEMAP_POSITIVEY |
3718 DDSCAPS2_CUBEMAP_POSITIVEZ |
3719 DDSCAPS2_CUBEMAP_NEGATIVEX |
3720 DDSCAPS2_CUBEMAP_NEGATIVEY |
3721 DDSCAPS2_CUBEMAP_NEGATIVEZ);
3722 if ((header.caps.caps2 & all_faces) != all_faces) {
3723 gobj_cat.error()
3724 << filename << " is missing some cube map faces; cannot load.\n";
3725 return false;
3726 }
3727 header.depth = 6;
3728 texture_type = TT_cube_map;
3729
3730 } else if (header.caps.caps2 & DDSCAPS2_VOLUME) {
3731 texture_type = TT_3d_texture;
3732
3733 } else {
3734 texture_type = TT_2d_texture;
3735 header.depth = 1;
3736 }
3737
3738 // Determine the function to use to read the DDS image.
3739 typedef PTA_uchar (*ReadDDSLevelFunc)(Texture *tex, Texture::CData *cdata,
3740 const DDSHeader &header, int n, istream &in);
3741 ReadDDSLevelFunc func = nullptr;
3742
3743 Format format = F_rgb;
3744 ComponentType component_type = T_unsigned_byte;
3745
3746 do_clear_ram_image(cdata);
3747 CompressionMode compression = CM_off;
3748
3749 if ((header.pf.pf_flags & DDPF_FOURCC) != 0 &&
3750 header.pf.four_cc == 0x30315844) { // 'DX10'
3751 // A DirectX 10 style texture, which has an additional header.
3752 func = read_dds_level_generic_uncompressed;
3753 unsigned int dxgi_format = dds.get_uint32();
3754 unsigned int dimension = dds.get_uint32();
3755 unsigned int misc_flag = dds.get_uint32();
3756 unsigned int array_size = dds.get_uint32();
3757 /*unsigned int alpha_mode = */dds.get_uint32();
3758
3759 switch (dxgi_format) {
3760 case 2: // DXGI_FORMAT_R32G32B32A32_FLOAT
3761 format = F_rgba32;
3762 component_type = T_float;
3763 func = read_dds_level_abgr32;
3764 break;
3765 case 10: // DXGI_FORMAT_R16G16B16A16_FLOAT
3766 format = F_rgba16;
3767 component_type = T_half_float;
3768 func = read_dds_level_abgr16;
3769 break;
3770 case 11: // DXGI_FORMAT_R16G16B16A16_UNORM
3771 format = F_rgba16;
3772 component_type = T_unsigned_short;
3773 func = read_dds_level_abgr16;
3774 break;
3775 case 12: // DXGI_FORMAT_R16G16B16A16_UINT
3776 format = F_rgba16i;
3777 component_type = T_unsigned_short;
3778 func = read_dds_level_abgr16;
3779 break;
3780 case 14: // DXGI_FORMAT_R16G16B16A16_SINT
3781 format = F_rgba16i;
3782 component_type = T_short;
3783 func = read_dds_level_abgr16;
3784 break;
3785 case 16: // DXGI_FORMAT_R32G32_FLOAT
3786 format = F_rg32;
3787 component_type = T_float;
3788 func = read_dds_level_raw;
3789 break;
3790 case 17: // DXGI_FORMAT_R32G32_UINT
3791 format = F_rg32i;
3792 component_type = T_unsigned_int;
3793 func = read_dds_level_raw;
3794 break;
3795 case 18: // DXGI_FORMAT_R32G32_SINT
3796 format = F_rg32i;
3797 component_type = T_int;
3798 func = read_dds_level_raw;
3799 break;
3800 case 27: // DXGI_FORMAT_R8G8B8A8_TYPELESS
3801 case 28: // DXGI_FORMAT_R8G8B8A8_UNORM
3802 format = F_rgba8;
3803 func = read_dds_level_abgr8;
3804 break;
3805 case 29: // DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
3806 format = F_srgb_alpha;
3807 func = read_dds_level_abgr8;
3808 break;
3809 case 30: // DXGI_FORMAT_R8G8B8A8_UINT
3810 format = F_rgba8i;
3811 func = read_dds_level_abgr8;
3812 break;
3813 case 31: // DXGI_FORMAT_R8G8B8A8_SNORM
3814 format = F_rgba8;
3815 component_type = T_byte;
3816 func = read_dds_level_abgr8;
3817 break;
3818 case 32: // DXGI_FORMAT_R8G8B8A8_SINT
3819 format = F_rgba8i;
3820 component_type = T_byte;
3821 func = read_dds_level_abgr8;
3822 break;
3823 case 34: // DXGI_FORMAT_R16G16_FLOAT:
3824 format = F_rg16;
3825 component_type = T_half_float;
3826 func = read_dds_level_raw;
3827 break;
3828 case 35: // DXGI_FORMAT_R16G16_UNORM:
3829 format = F_rg16;
3830 component_type = T_unsigned_short;
3831 func = read_dds_level_raw;
3832 break;
3833 case 36: // DXGI_FORMAT_R16G16_UINT:
3834 format = F_rg16i;
3835 component_type = T_unsigned_short;
3836 func = read_dds_level_raw;
3837 break;
3838 case 37: // DXGI_FORMAT_R16G16_SNORM:
3839 format = F_rg16;
3840 component_type = T_short;
3841 func = read_dds_level_raw;
3842 break;
3843 case 38: // DXGI_FORMAT_R16G16_SINT:
3844 format = F_rg16i;
3845 component_type = T_short;
3846 func = read_dds_level_raw;
3847 break;
3848 case 40: // DXGI_FORMAT_D32_FLOAT
3849 format = F_depth_component32;
3850 component_type = T_float;
3851 func = read_dds_level_raw;
3852 break;
3853 case 41: // DXGI_FORMAT_R32_FLOAT
3854 format = F_r32;
3855 component_type = T_float;
3856 func = read_dds_level_raw;
3857 break;
3858 case 42: // DXGI_FORMAT_R32_UINT
3859 format = F_r32i;
3860 component_type = T_unsigned_int;
3861 func = read_dds_level_raw;
3862 break;
3863 case 43: // DXGI_FORMAT_R32_SINT
3864 format = F_r32i;
3865 component_type = T_int;
3866 func = read_dds_level_raw;
3867 break;
3868 case 48: // DXGI_FORMAT_R8G8_TYPELESS
3869 case 49: // DXGI_FORMAT_R8G8_UNORM
3870 format = F_rg;
3871 break;
3872 case 50: // DXGI_FORMAT_R8G8_UINT
3873 format = F_rg8i;
3874 break;
3875 case 51: // DXGI_FORMAT_R8G8_SNORM
3876 format = F_rg;
3877 component_type = T_byte;
3878 break;
3879 case 52: // DXGI_FORMAT_R8G8_SINT
3880 format = F_rg8i;
3881 component_type = T_byte;
3882 break;
3883 case 54: // DXGI_FORMAT_R16_FLOAT:
3884 format = F_r16;
3885 component_type = T_half_float;
3886 func = read_dds_level_raw;
3887 break;
3888 case 55: // DXGI_FORMAT_D16_UNORM:
3889 format = F_depth_component16;
3890 component_type = T_unsigned_short;
3891 func = read_dds_level_raw;
3892 break;
3893 case 56: // DXGI_FORMAT_R16_UNORM:
3894 format = F_r16;
3895 component_type = T_unsigned_short;
3896 func = read_dds_level_raw;
3897 break;
3898 case 57: // DXGI_FORMAT_R16_UINT:
3899 format = F_r16i;
3900 component_type = T_unsigned_short;
3901 func = read_dds_level_raw;
3902 break;
3903 case 58: // DXGI_FORMAT_R16_SNORM:
3904 format = F_r16;
3905 component_type = T_short;
3906 func = read_dds_level_raw;
3907 break;
3908 case 59: // DXGI_FORMAT_R16_SINT:
3909 format = F_r16i;
3910 component_type = T_short;
3911 func = read_dds_level_raw;
3912 break;
3913 case 60: // DXGI_FORMAT_R8_TYPELESS
3914 case 61: // DXGI_FORMAT_R8_UNORM
3915 format = F_red;
3916 break;
3917 case 62: // DXGI_FORMAT_R8_UINT
3918 format = F_r8i;
3919 break;
3920 case 63: // DXGI_FORMAT_R8_SNORM
3921 format = F_red;
3922 component_type = T_byte;
3923 break;
3924 case 64: // DXGI_FORMAT_R8_SINT
3925 format = F_r8i;
3926 component_type = T_byte;
3927 break;
3928 case 65: // DXGI_FORMAT_A8_UNORM
3929 format = F_alpha;
3930 break;
3931 case 70: // DXGI_FORMAT_BC1_TYPELESS
3932 case 71: // DXGI_FORMAT_BC1_UNORM
3933 format = F_rgb;
3934 compression = CM_dxt1;
3935 func = read_dds_level_bc1;
3936 break;
3937 case 72: // DXGI_FORMAT_BC1_UNORM_SRGB
3938 format = F_srgb;
3939 compression = CM_dxt1;
3940 func = read_dds_level_bc1;
3941 break;
3942 case 73: // DXGI_FORMAT_BC2_TYPELESS
3943 case 74: // DXGI_FORMAT_BC2_UNORM
3944 format = F_rgba;
3945 compression = CM_dxt3;
3946 func = read_dds_level_bc2;
3947 break;
3948 case 75: // DXGI_FORMAT_BC2_UNORM_SRGB
3949 format = F_srgb_alpha;
3950 compression = CM_dxt3;
3951 func = read_dds_level_bc2;
3952 break;
3953 case 76: // DXGI_FORMAT_BC3_TYPELESS
3954 case 77: // DXGI_FORMAT_BC3_UNORM
3955 format = F_rgba;
3956 compression = CM_dxt5;
3957 func = read_dds_level_bc3;
3958 break;
3959 case 78: // DXGI_FORMAT_BC3_UNORM_SRGB
3960 format = F_srgb_alpha;
3961 compression = CM_dxt5;
3962 func = read_dds_level_bc3;
3963 break;
3964 case 79: // DXGI_FORMAT_BC4_TYPELESS
3965 case 80: // DXGI_FORMAT_BC4_UNORM
3966 format = F_red;
3967 compression = CM_rgtc;
3968 func = read_dds_level_bc4;
3969 break;
3970 case 82: // DXGI_FORMAT_BC5_TYPELESS
3971 case 83: // DXGI_FORMAT_BC5_UNORM
3972 format = F_rg;
3973 compression = CM_rgtc;
3974 func = read_dds_level_bc5;
3975 break;
3976 case 87: // DXGI_FORMAT_B8G8R8A8_UNORM
3977 case 90: // DXGI_FORMAT_B8G8R8A8_TYPELESS
3978 format = F_rgba8;
3979 break;
3980 case 88: // DXGI_FORMAT_B8G8R8X8_UNORM
3981 case 92: // DXGI_FORMAT_B8G8R8X8_TYPELESS
3982 format = F_rgb8;
3983 break;
3984 case 91: // DXGI_FORMAT_B8G8R8A8_UNORM_SRGB
3985 format = F_srgb_alpha;
3986 break;
3987 case 93: // DXGI_FORMAT_B8G8R8X8_UNORM_SRGB
3988 format = F_srgb;
3989 break;
3990 case 115: // DXGI_FORMAT_B4G4R4A4_UNORM
3991 format = F_rgba4;
3992 break;
3993 default:
3994 gobj_cat.error()
3995 << filename << ": unsupported DXGI format " << dxgi_format << ".\n";
3996 return false;
3997 }
3998
3999 switch (dimension) {
4000 case 2: // DDS_DIMENSION_TEXTURE1D
4001 texture_type = TT_1d_texture;
4002 header.depth = 1;
4003 break;
4004 case 3: // DDS_DIMENSION_TEXTURE2D
4005 if (misc_flag & 0x4) { // DDS_RESOURCE_MISC_TEXTURECUBE
4006 if (array_size > 1) {
4007 texture_type = TT_cube_map_array;
4008 header.depth = array_size * 6;
4009 } else {
4010 texture_type = TT_cube_map;
4011 header.depth = 6;
4012 }
4013 } else {
4014 if (array_size > 1) {
4015 texture_type = TT_2d_texture_array;
4016 header.depth = array_size;
4017 } else {
4018 texture_type = TT_2d_texture;
4019 header.depth = 1;
4020 }
4021 }
4022 break;
4023 case 4: // DDS_DIMENSION_TEXTURE3D
4024 texture_type = TT_3d_texture;
4025 break;
4026 default:
4027 gobj_cat.error()
4028 << filename << ": unsupported dimension.\n";
4029 return false;
4030 }
4031
4032 } else if (header.pf.pf_flags & DDPF_FOURCC) {
4033 // Some compressed texture format.
4034 if (texture_type == TT_3d_texture) {
4035 gobj_cat.error()
4036 << filename << ": unsupported compression on 3-d texture.\n";
4037 return false;
4038 }
4039
4040 // Most of the compressed formats support alpha.
4041 format = F_rgba;
4042 switch (header.pf.four_cc) {
4043 case 0x31545844: // 'DXT1', little-endian.
4044 compression = CM_dxt1;
4045 func = read_dds_level_bc1;
4046 format = F_rgbm;
4047 break;
4048 case 0x32545844: // 'DXT2'
4049 compression = CM_dxt2;
4050 func = read_dds_level_bc2;
4051 break;
4052 case 0x33545844: // 'DXT3'
4053 compression = CM_dxt3;
4054 func = read_dds_level_bc2;
4055 break;
4056 case 0x34545844: // 'DXT4'
4057 compression = CM_dxt4;
4058 func = read_dds_level_bc3;
4059 break;
4060 case 0x35545844: // 'DXT5'
4061 compression = CM_dxt5;
4062 func = read_dds_level_bc3;
4063 break;
4064 case 0x31495441: // 'ATI1'
4065 case 0x55344342: // 'BC4U'
4066 compression = CM_rgtc;
4067 func = read_dds_level_bc4;
4068 format = F_red;
4069 break;
4070 case 0x32495441: // 'ATI2'
4071 case 0x55354342: // 'BC5U'
4072 compression = CM_rgtc;
4073 func = read_dds_level_bc5;
4074 format = F_rg;
4075 break;
4076 case 36: // D3DFMT_A16B16G16R16
4077 func = read_dds_level_abgr16;
4078 format = F_rgba16;
4079 component_type = T_unsigned_short;
4080 break;
4081 case 110: // D3DFMT_Q16W16V16U16
4082 func = read_dds_level_abgr16;
4083 format = F_rgba16;
4084 component_type = T_short;
4085 break;
4086 case 113: // D3DFMT_A16B16G16R16F
4087 func = read_dds_level_abgr16;
4088 format = F_rgba16;
4089 component_type = T_half_float;
4090 break;
4091 case 116: // D3DFMT_A32B32G32R32F
4092 func = read_dds_level_abgr32;
4093 format = F_rgba32;
4094 component_type = T_float;
4095 break;
4096 default:
4097 gobj_cat.error()
4098 << filename << ": unsupported texture compression (FourCC: 0x"
4099 << std::hex << header.pf.four_cc << std::dec << ").\n";
4100 return false;
4101 }
4102
4103 } else {
4104 // An uncompressed texture format.
4105 func = read_dds_level_generic_uncompressed;
4106
4107 if (header.pf.pf_flags & DDPF_ALPHAPIXELS) {
4108 // An uncompressed format that involves alpha.
4109 format = F_rgba;
4110 if (header.pf.rgb_bitcount == 32 &&
4111 header.pf.r_mask == 0x000000ff &&
4112 header.pf.g_mask == 0x0000ff00 &&
4113 header.pf.b_mask == 0x00ff0000 &&
4114 header.pf.a_mask == 0xff000000U) {
4115 func = read_dds_level_abgr8;
4116 } else if (header.pf.rgb_bitcount == 32 &&
4117 header.pf.r_mask == 0x00ff0000 &&
4118 header.pf.g_mask == 0x0000ff00 &&
4119 header.pf.b_mask == 0x000000ff &&
4120 header.pf.a_mask == 0xff000000U) {
4121 func = read_dds_level_rgba8;
4122
4123 } else if (header.pf.r_mask != 0 &&
4124 header.pf.g_mask == 0 &&
4125 header.pf.b_mask == 0) {
4126 func = read_dds_level_luminance_uncompressed;
4127 format = F_luminance_alpha;
4128 }
4129 } else {
4130 // An uncompressed format that doesn't involve alpha.
4131 if (header.pf.rgb_bitcount == 24 &&
4132 header.pf.r_mask == 0x00ff0000 &&
4133 header.pf.g_mask == 0x0000ff00 &&
4134 header.pf.b_mask == 0x000000ff) {
4135 func = read_dds_level_bgr8;
4136 } else if (header.pf.rgb_bitcount == 24 &&
4137 header.pf.r_mask == 0x000000ff &&
4138 header.pf.g_mask == 0x0000ff00 &&
4139 header.pf.b_mask == 0x00ff0000) {
4140 func = read_dds_level_rgb8;
4141
4142 } else if (header.pf.r_mask != 0 &&
4143 header.pf.g_mask == 0 &&
4144 header.pf.b_mask == 0) {
4145 func = read_dds_level_luminance_uncompressed;
4146 format = F_luminance;
4147 }
4148 }
4149 }
4150
4151 do_setup_texture(cdata, texture_type, header.width, header.height, header.depth,
4152 component_type, format);
4153
4154 cdata->_orig_file_x_size = cdata->_x_size;
4155 cdata->_orig_file_y_size = cdata->_y_size;
4156 cdata->_compression = compression;
4157 cdata->_ram_image_compression = compression;
4158
4159 if (!header_only) {
4160 switch (texture_type) {
4161 case TT_3d_texture:
4162 {
4163 // 3-d textures store all the depth slices for mipmap level 0, then
4164 // all the depth slices for mipmap level 1, and so on.
4165 for (int n = 0; n < (int)header.num_levels; ++n) {
4166 int z_size = do_get_expected_mipmap_z_size(cdata, n);
4167 pvector<PTA_uchar> pages;
4168 size_t page_size = 0;
4169 int z;
4170 for (z = 0; z < z_size; ++z) {
4171 PTA_uchar page = func(this, cdata, header, n, in);
4172 if (page.is_null()) {
4173 return false;
4174 }
4175 nassertr(page_size == 0 || page_size == page.size(), false);
4176 page_size = page.size();
4177 pages.push_back(page);
4178 }
4179 // Now reassemble the pages into one big image. Because this is a
4180 // Microsoft format, the images are stacked in reverse order; re-
4181 // reverse them.
4182 PTA_uchar image = PTA_uchar::empty_array(page_size * z_size);
4183 unsigned char *imagep = (unsigned char *)image.p();
4184 for (z = 0; z < z_size; ++z) {
4185 int fz = z_size - 1 - z;
4186 memcpy(imagep + z * page_size, pages[fz].p(), page_size);
4187 }
4188
4189 do_set_ram_mipmap_image(cdata, n, image, page_size);
4190 }
4191 }
4192 break;
4193
4194 case TT_cube_map:
4195 {
4196 // Cube maps store all the mipmap levels for face 0, then all the
4197 // mipmap levels for face 1, and so on.
4199 pages.reserve(6);
4200 int z, n;
4201 for (z = 0; z < 6; ++z) {
4202 pages.push_back(pvector<PTA_uchar>());
4203 pvector<PTA_uchar> &levels = pages.back();
4204 levels.reserve(header.num_levels);
4205
4206 for (n = 0; n < (int)header.num_levels; ++n) {
4207 PTA_uchar image = func(this, cdata, header, n, in);
4208 if (image.is_null()) {
4209 return false;
4210 }
4211 levels.push_back(image);
4212 }
4213 }
4214
4215 // Now, for each level, reassemble the pages into one big image.
4216 // Because this is a Microsoft format, the levels are arranged in a
4217 // rotated order.
4218 static const int level_remap[6] = {
4219 0, 1, 5, 4, 2, 3
4220 };
4221 for (n = 0; n < (int)header.num_levels; ++n) {
4222 size_t page_size = pages[0][n].size();
4223 PTA_uchar image = PTA_uchar::empty_array(page_size * 6);
4224 unsigned char *imagep = (unsigned char *)image.p();
4225 for (z = 0; z < 6; ++z) {
4226 int fz = level_remap[z];
4227 nassertr(pages[fz][n].size() == page_size, false);
4228 memcpy(imagep + z * page_size, pages[fz][n].p(), page_size);
4229 }
4230
4231 do_set_ram_mipmap_image(cdata, n, image, page_size);
4232 }
4233 }
4234 break;
4235
4236 case TT_2d_texture_array:
4237 case TT_cube_map_array: //TODO: rearrange cube map array faces?
4238 {
4239 // Texture arrays store all the mipmap levels for layer 0, then all
4240 // the mipmap levels for layer 1, and so on.
4242 pages.reserve(header.depth);
4243 int z, n;
4244 for (z = 0; z < (int)header.depth; ++z) {
4245 pages.push_back(pvector<PTA_uchar>());
4246 pvector<PTA_uchar> &levels = pages.back();
4247 levels.reserve(header.num_levels);
4248
4249 for (n = 0; n < (int)header.num_levels; ++n) {
4250 PTA_uchar image = func(this, cdata, header, n, in);
4251 if (image.is_null()) {
4252 return false;
4253 }
4254 levels.push_back(image);
4255 }
4256 }
4257
4258 // Now, for each level, reassemble the pages into one big image.
4259 for (n = 0; n < (int)header.num_levels; ++n) {
4260 size_t page_size = pages[0][n].size();
4261 PTA_uchar image = PTA_uchar::empty_array(page_size * header.depth);
4262 unsigned char *imagep = (unsigned char *)image.p();
4263 for (z = 0; z < (int)header.depth; ++z) {
4264 nassertr(pages[z][n].size() == page_size, false);
4265 memcpy(imagep + z * page_size, pages[z][n].p(), page_size);
4266 }
4267
4268 do_set_ram_mipmap_image(cdata, n, image, page_size);
4269 }
4270 }
4271 break;
4272
4273 default:
4274 // Normal 2-d textures simply store the mipmap levels.
4275 {
4276 for (int n = 0; n < (int)header.num_levels; ++n) {
4277 PTA_uchar image = func(this, cdata, header, n, in);
4278 if (image.is_null()) {
4279 return false;
4280 }
4281 do_set_ram_mipmap_image(cdata, n, image, 0);
4282 }
4283 }
4284 }
4285 cdata->_has_read_pages = true;
4286 cdata->_has_read_mipmaps = true;
4287 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
4288 }
4289
4290 if (in.fail()) {
4291 gobj_cat.error()
4292 << filename << ": truncated DDS file.\n";
4293 return false;
4294 }
4295
4296 cdata->_loaded_from_image = true;
4297 cdata->_loaded_from_txo = true;
4298
4299 return true;
4300}
4301
4302/**
4303 * Called internally when read() detects a KTX file. Assumes the lock is
4304 * already held.
4305 */
4306bool Texture::
4307do_read_ktx_file(CData *cdata, const Filename &fullpath, bool header_only) {
4309
4310 Filename filename = Filename::binary_filename(fullpath);
4311 PT(VirtualFile) file = vfs->get_file(filename);
4312 if (file == nullptr) {
4313 // No such file.
4314 gobj_cat.error()
4315 << "Could not find " << fullpath << "\n";
4316 return false;
4317 }
4318
4319 if (gobj_cat.is_debug()) {
4320 gobj_cat.debug()
4321 << "Reading KTX file " << filename << "\n";
4322 }
4323
4324 istream *in = file->open_read_file(true);
4325 if (in == nullptr) {
4326 gobj_cat.error()
4327 << "Failed to open " << filename << " for reading.\n";
4328 return false;
4329 }
4330
4331 bool success = do_read_ktx(cdata, *in, fullpath, header_only);
4332 vfs->close_read_file(in);
4333
4334 if (!has_name()) {
4335 set_name(fullpath.get_basename_wo_extension());
4336 }
4337
4338 cdata->_fullpath = fullpath;
4339 cdata->_alpha_fullpath = Filename();
4340 cdata->_keep_ram_image = false;
4341
4342 return success;
4343}
4344
4345/**
4346 *
4347 */
4348bool Texture::
4349do_read_ktx(CData *cdata, istream &in, const string &filename, bool header_only) {
4350 StreamReader ktx(in);
4351
4352 unsigned char magic[12];
4353 if (ktx.extract_bytes(magic, 12) != 12 ||
4354 memcmp(magic, "\xABKTX 11\xBB\r\n\x1A\n", 12) != 0) {
4355 gobj_cat.error()
4356 << filename << " is not a KTX file.\n";
4357 return false;
4358 }
4359
4360 // See: https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/
4361 uint32_t gl_type, /*type_size,*/ gl_format, internal_format, gl_base_format,
4362 width, height, depth, num_array_elements, num_faces, num_mipmap_levels,
4363 kvdata_size;
4364
4365 bool big_endian;
4366 if (ktx.get_uint32() == 0x04030201) {
4367 big_endian = false;
4368 gl_type = ktx.get_uint32();
4369 /*type_size = */ktx.get_uint32();
4370 gl_format = ktx.get_uint32();
4371 internal_format = ktx.get_uint32();
4372 gl_base_format = ktx.get_uint32();
4373 width = ktx.get_uint32();
4374 height = ktx.get_uint32();
4375 depth = ktx.get_uint32();
4376 num_array_elements = ktx.get_uint32();
4377 num_faces = ktx.get_uint32();
4378 num_mipmap_levels = ktx.get_uint32();
4379 kvdata_size = ktx.get_uint32();
4380 } else {
4381 big_endian = true;
4382 gl_type = ktx.get_be_uint32();
4383 /*type_size = */ktx.get_be_uint32();
4384 gl_format = ktx.get_be_uint32();
4385 internal_format = ktx.get_be_uint32();
4386 gl_base_format = ktx.get_be_uint32();
4387 width = ktx.get_be_uint32();
4388 height = ktx.get_be_uint32();
4389 depth = ktx.get_be_uint32();
4390 num_array_elements = ktx.get_be_uint32();
4391 num_faces = ktx.get_be_uint32();
4392 num_mipmap_levels = ktx.get_be_uint32();
4393 kvdata_size = ktx.get_be_uint32();
4394 }
4395
4396 // Skip metadata section.
4397 ktx.skip_bytes(kvdata_size);
4398
4399 ComponentType type;
4400 CompressionMode compression;
4401 Format format;
4402 bool swap_bgr = false;
4403
4404 if (gl_type == 0 || gl_format == 0) {
4405 // Compressed texture.
4406 if (gl_type > 0 || gl_format > 0) {
4407 gobj_cat.error()
4408 << "Compressed textures must have both type and format set to 0.\n";
4409 return false;
4410 }
4411 type = T_unsigned_byte;
4412 compression = CM_on;
4413
4414 KTXFormat base_format;
4415 switch ((KTXCompressedFormat)internal_format) {
4416 case KTX_COMPRESSED_RED:
4417 format = F_red;
4418 base_format = KTX_RED;
4419 break;
4420 case KTX_COMPRESSED_RG:
4421 format = F_rg;
4422 base_format = KTX_RG;
4423 break;
4424 case KTX_COMPRESSED_RGB:
4425 format = F_rgb;
4426 base_format = KTX_RGB;
4427 break;
4428 case KTX_COMPRESSED_RGBA:
4429 format = F_rgba;
4430 base_format = KTX_RGBA;
4431 break;
4432 case KTX_COMPRESSED_SRGB:
4433 format = F_srgb;
4434 base_format = KTX_SRGB;
4435 break;
4436 case KTX_COMPRESSED_SRGB_ALPHA:
4437 format = F_srgb_alpha;
4438 base_format = KTX_SRGB_ALPHA;
4439 break;
4440 case KTX_COMPRESSED_RGB_FXT1_3DFX:
4441 format = F_rgb;
4442 base_format = KTX_RGB;
4443 compression = CM_fxt1;
4444 break;
4445 case KTX_COMPRESSED_RGBA_FXT1_3DFX:
4446 format = F_rgba;
4447 base_format = KTX_RGBA;
4448 compression = CM_fxt1;
4449 break;
4450 case KTX_COMPRESSED_RGB_S3TC_DXT1:
4451 format = F_rgb;
4452 base_format = KTX_RGB;
4453 compression = CM_dxt1;
4454 break;
4455 case KTX_COMPRESSED_RGBA_S3TC_DXT1:
4456 format = F_rgbm;
4457 base_format = KTX_RGB;
4458 compression = CM_dxt1;
4459 break;
4460 case KTX_COMPRESSED_RGBA_S3TC_DXT3:
4461 format = F_rgba;
4462 base_format = KTX_RGBA;
4463 compression = CM_dxt3;
4464 break;
4465 case KTX_COMPRESSED_RGBA_S3TC_DXT5:
4466 format = F_rgba;
4467 base_format = KTX_RGBA;
4468 compression = CM_dxt5;
4469 break;
4470 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1:
4471 format = F_srgb_alpha;
4472 base_format = KTX_SRGB_ALPHA;
4473 compression = CM_dxt1;
4474 break;
4475 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3:
4476 format = F_srgb_alpha;
4477 base_format = KTX_SRGB_ALPHA;
4478 compression = CM_dxt3;
4479 break;
4480 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5:
4481 format = F_srgb_alpha;
4482 base_format = KTX_SRGB_ALPHA;
4483 compression = CM_dxt5;
4484 break;
4485 case KTX_COMPRESSED_SRGB_S3TC_DXT1:
4486 format = F_srgb;
4487 base_format = KTX_SRGB;
4488 compression = CM_dxt1;
4489 break;
4490 case KTX_COMPRESSED_RED_RGTC1:
4491 case KTX_COMPRESSED_SIGNED_RED_RGTC1:
4492 format = F_red;
4493 base_format = KTX_RED;
4494 compression = CM_rgtc;
4495 break;
4496 case KTX_COMPRESSED_RG_RGTC2:
4497 case KTX_COMPRESSED_SIGNED_RG_RGTC2:
4498 format = F_rg;
4499 base_format = KTX_RG;
4500 compression = CM_rgtc;
4501 break;
4502 case KTX_ETC1_RGB8:
4503 format = F_rgb;
4504 base_format = KTX_RGB;
4505 compression = CM_etc1;
4506 break;
4507 case KTX_ETC1_SRGB8:
4508 format = F_srgb;
4509 base_format = KTX_SRGB;
4510 compression = CM_etc1;
4511 break;
4512 case KTX_COMPRESSED_RGB8_ETC2:
4513 format = F_rgb;
4514 base_format = KTX_RGB;
4515 compression = CM_etc2;
4516 break;
4517 case KTX_COMPRESSED_SRGB8_ETC2:
4518 format = F_srgb;
4519 base_format = KTX_SRGB;
4520 compression = CM_etc2;
4521 break;
4522 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4523 format = F_rgbm;
4524 base_format = KTX_RGBA;
4525 compression = CM_etc2;
4526 break;
4527 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4528 format = F_rgbm;
4529 base_format = KTX_SRGB8_ALPHA8;
4530 compression = CM_etc2;
4531 break;
4532 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
4533 format = F_rgba;
4534 base_format = KTX_RGBA;
4535 compression = CM_etc2;
4536 break;
4537 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
4538 format = F_srgb_alpha;
4539 base_format = KTX_SRGB8_ALPHA8;
4540 compression = CM_etc2;
4541 break;
4542 case KTX_COMPRESSED_R11_EAC:
4543 case KTX_COMPRESSED_SIGNED_R11_EAC:
4544 format = F_red;
4545 base_format = KTX_RED;
4546 compression = CM_eac;
4547 break;
4548 case KTX_COMPRESSED_RG11_EAC:
4549 case KTX_COMPRESSED_SIGNED_RG11_EAC:
4550 format = F_rg;
4551 base_format = KTX_RG;
4552 compression = CM_eac;
4553 break;
4554 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1:
4555 format = F_srgb_alpha;
4556 base_format = KTX_SRGB_ALPHA;
4557 compression = CM_pvr1_2bpp;
4558 break;
4559 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1:
4560 format = F_srgb_alpha;
4561 base_format = KTX_SRGB_ALPHA;
4562 compression = CM_pvr1_4bpp;
4563 break;
4564 case KTX_COMPRESSED_RGBA_BPTC_UNORM:
4565 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM:
4566 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT:
4567 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT:
4568 default:
4569 gobj_cat.error()
4570 << filename << " has unsupported compressed internal format " << internal_format << "\n";
4571 return false;
4572 }
4573
4574 if (base_format != gl_base_format) {
4575 gobj_cat.error()
4576 << filename << " has internal format that is incompatible with base "
4577 "format (0x" << std::hex << gl_base_format << ", expected 0x"
4578 << base_format << std::dec << ")\n";
4579 return false;
4580 }
4581
4582 } else {
4583 // Uncompressed texture.
4584 compression = CM_off;
4585 switch ((KTXType)gl_type) {
4586 case KTX_BYTE:
4587 type = T_byte;
4588 break;
4589 case KTX_UNSIGNED_BYTE:
4590 type = T_unsigned_byte;
4591 break;
4592 case KTX_SHORT:
4593 type = T_short;
4594 break;
4595 case KTX_UNSIGNED_SHORT:
4596 type = T_unsigned_short;
4597 break;
4598 case KTX_INT:
4599 type = T_int;
4600 break;
4601 case KTX_UNSIGNED_INT:
4602 type = T_unsigned_int;
4603 break;
4604 case KTX_FLOAT:
4605 type = T_float;
4606 break;
4607 case KTX_HALF_FLOAT:
4608 type = T_half_float;
4609 break;
4610 case KTX_UNSIGNED_INT_24_8:
4611 type = T_unsigned_int_24_8;
4612 break;
4613 default:
4614 gobj_cat.error()
4615 << filename << " has unsupported component type " << gl_type << "\n";
4616 return false;
4617 }
4618
4619 if (gl_format != gl_base_format) {
4620 gobj_cat.error()
4621 << filename << " has mismatched formats: " << gl_format << " != "
4622 << gl_base_format << "\n";
4623 }
4624
4625 switch (gl_format) {
4626 case KTX_DEPTH_COMPONENT:
4627 switch (internal_format) {
4628 case KTX_DEPTH_COMPONENT:
4629 format = F_depth_component;
4630 break;
4631 case KTX_DEPTH_COMPONENT16:
4632 format = F_depth_component16;
4633 break;
4634 case KTX_DEPTH_COMPONENT24:
4635 format = F_depth_component24;
4636 break;
4637 case KTX_DEPTH_COMPONENT32:
4638 case KTX_DEPTH_COMPONENT32F:
4639 format = F_depth_component32;
4640 break;
4641 default:
4642 format = F_depth_component;
4643 gobj_cat.warning()
4644 << filename << " has unsupported depth component format " << internal_format << "\n";
4645 }
4646 break;
4647
4648 case KTX_DEPTH_STENCIL:
4649 format = F_depth_stencil;
4650 if (internal_format != KTX_DEPTH_STENCIL &&
4651 internal_format != KTX_DEPTH24_STENCIL8) {
4652 gobj_cat.warning()
4653 << filename << " has unsupported depth stencil format " << internal_format << "\n";
4654 }
4655 break;
4656
4657 case KTX_RED:
4658 switch (internal_format) {
4659 case KTX_RED:
4660 case KTX_RED_SNORM:
4661 case KTX_R8:
4662 case KTX_R8_SNORM:
4663 format = F_red;
4664 break;
4665 case KTX_R16:
4666 case KTX_R16_SNORM:
4667 case KTX_R16F:
4668 format = F_r16;
4669 break;
4670 case KTX_R32F:
4671 format = F_r32;
4672 break;
4673 default:
4674 format = F_red;
4675 gobj_cat.warning()
4676 << filename << " has unsupported red format " << internal_format << "\n";
4677 }
4678 break;
4679
4680 case KTX_RED_INTEGER:
4681 switch (internal_format) {
4682 case KTX_R8I:
4683 case KTX_R8UI:
4684 format = F_r8i;
4685 break;
4686 case KTX_R16I:
4687 case KTX_R16UI:
4688 format = F_r16i;
4689 break;
4690 case KTX_R32I:
4691 case KTX_R32UI:
4692 format = F_r32i;
4693 break;
4694 default:
4695 gobj_cat.error()
4696 << filename << " has unsupported red integer format " << internal_format << "\n";
4697 return false;
4698 }
4699 break;
4700
4701 case KTX_GREEN:
4702 format = F_green;
4703 if (internal_format != KTX_GREEN) {
4704 gobj_cat.warning()
4705 << filename << " has unsupported green format " << internal_format << "\n";
4706 }
4707 break;
4708
4709 case KTX_BLUE:
4710 format = F_blue;
4711 if (internal_format != KTX_BLUE) {
4712 gobj_cat.warning()
4713 << filename << " has unsupported blue format " << internal_format << "\n";
4714 }
4715 break;
4716
4717 case KTX_RG:
4718 switch (internal_format) {
4719 case KTX_RG:
4720 case KTX_RG_SNORM:
4721 case KTX_RG8:
4722 case KTX_RG8_SNORM:
4723 format = F_rg;
4724 break;
4725 case KTX_RG16:
4726 case KTX_RG16_SNORM:
4727 case KTX_RG16F:
4728 format = F_rg16;
4729 break;
4730 case KTX_RG32F:
4731 format = F_rg32;
4732 break;
4733 default:
4734 format = F_rg;
4735 gobj_cat.warning()
4736 << filename << " has unsupported RG format " << internal_format << "\n";
4737 }
4738 break;
4739
4740 case KTX_RG_INTEGER:
4741 switch (internal_format) {
4742 case KTX_RG8I:
4743 case KTX_RG8UI:
4744 format = F_rg8i;
4745 break;
4746 case KTX_RG16I:
4747 case KTX_RG16UI:
4748 format = F_rg16i;
4749 break;
4750 case KTX_RG32I:
4751 case KTX_RG32UI:
4752 format = F_rg32i;
4753 break;
4754 default:
4755 gobj_cat.error()
4756 << filename << " has unsupported RG integer format " << internal_format << "\n";
4757 return false;
4758 }
4759 break;
4760
4761 case KTX_RGB:
4762 swap_bgr = true;
4763 case KTX_BGR:
4764 switch (internal_format) {
4765 case KTX_RGB:
4766 case KTX_RGB_SNORM:
4767 format = F_rgb;
4768 break;
4769 case KTX_RGB5:
4770 format = F_rgb5;
4771 break;
4772 case KTX_RGB12:
4773 format = F_rgb12;
4774 break;
4775 case KTX_R3_G3_B2:
4776 format = F_rgb332;
4777 break;
4778 case KTX_RGB9_E5:
4779 format = F_rgb9_e5;
4780 break;
4781 case KTX_R11F_G11F_B10F:
4782 format = F_r11_g11_b10;
4783 break;
4784 case KTX_RGB8:
4785 case KTX_RGB8_SNORM:
4786 format = F_rgb8;
4787 break;
4788 case KTX_RGB16:
4789 case KTX_RGB16_SNORM:
4790 case KTX_RGB16F:
4791 format = F_rgb16;
4792 break;
4793 case KTX_RGB32F:
4794 format = F_rgb32;
4795 break;
4796 case KTX_SRGB:
4797 case KTX_SRGB8:
4798 format = F_srgb;
4799 break;
4800 default:
4801 format = F_rgb;
4802 gobj_cat.warning()
4803 << filename << " has unsupported RGB format " << internal_format << "\n";
4804 }
4805 break;
4806
4807 case KTX_RGB_INTEGER:
4808 swap_bgr = true;
4809 case KTX_BGR_INTEGER:
4810 switch (internal_format) {
4811 case KTX_RGB8I:
4812 case KTX_RGB8UI:
4813 format = F_rgb8i;
4814 break;
4815 case KTX_RGB16I:
4816 case KTX_RGB16UI:
4817 format = F_rgb16i;
4818 break;
4819 case KTX_RGB32I:
4820 case KTX_RGB32UI:
4821 format = F_rgb32i;
4822 break;
4823 default:
4824 gobj_cat.error()
4825 << filename << " has unsupported RGB integer format " << internal_format << "\n";
4826 return false;
4827 }
4828 break;
4829
4830 case KTX_RGBA:
4831 swap_bgr = true;
4832 case KTX_BGRA:
4833 switch (internal_format) {
4834 case KTX_RGBA:
4835 case KTX_RGBA_SNORM:
4836 format = F_rgba;
4837 break;
4838 case KTX_RGBA4:
4839 format = F_rgba4;
4840 break;
4841 case KTX_RGB5_A1:
4842 format = F_rgba5;
4843 break;
4844 case KTX_RGBA12:
4845 format = F_rgba12;
4846 break;
4847 case KTX_RGB10_A2:
4848 format = F_rgb10_a2;
4849 break;
4850 case KTX_RGBA8:
4851 case KTX_RGBA8_SNORM:
4852 format = F_rgba8;
4853 break;
4854 case KTX_RGBA16:
4855 case KTX_RGBA16_SNORM:
4856 case KTX_RGBA16F:
4857 format = F_rgba16;
4858 break;
4859 case KTX_RGBA32F:
4860 format = F_rgba32;
4861 break;
4862 case KTX_SRGB_ALPHA:
4863 case KTX_SRGB8_ALPHA8:
4864 format = F_srgb_alpha;
4865 break;
4866 default:
4867 format = F_rgba;
4868 gobj_cat.warning()
4869 << filename << " has unsupported RGBA format " << internal_format << "\n";
4870 }
4871 break;
4872 break;
4873
4874 case KTX_RGBA_INTEGER:
4875 swap_bgr = true;
4876 case KTX_BGRA_INTEGER:
4877 switch (internal_format) {
4878 case KTX_RGBA8I:
4879 case KTX_RGBA8UI:
4880 format = F_rgba8i;
4881 break;
4882 case KTX_RGBA16I:
4883 case KTX_RGBA16UI:
4884 format = F_rgba16i;
4885 break;
4886 case KTX_RGBA32I:
4887 case KTX_RGBA32UI:
4888 format = F_rgba32i;
4889 break;
4890 default:
4891 gobj_cat.error()
4892 << filename << " has unsupported RGBA integer format " << internal_format << "\n";
4893 return false;
4894 }
4895 break;
4896
4897 case KTX_LUMINANCE:
4898 format = F_luminance;
4899 break;
4900
4901 case KTX_LUMINANCE_ALPHA:
4902 format = F_luminance_alpha;
4903 break;
4904
4905 case KTX_ALPHA:
4906 format = F_alpha;
4907 break;
4908
4909 case KTX_STENCIL_INDEX:
4910 default:
4911 gobj_cat.error()
4912 << filename << " has unsupported format " << gl_format << "\n";
4913 return false;
4914 }
4915 }
4916
4917 TextureType texture_type;
4918 if (depth > 0) {
4919 texture_type = TT_3d_texture;
4920
4921 } else if (num_faces > 1) {
4922 if (num_faces != 6) {
4923 gobj_cat.error()
4924 << filename << " has " << num_faces << " cube map faces, expected 6\n";
4925 return false;
4926 }
4927 if (width != height) {
4928 gobj_cat.error()
4929 << filename << " is cube map, but does not have square dimensions\n";
4930 return false;
4931 }
4932 if (num_array_elements > 0) {
4933 depth = num_array_elements * 6;
4934 texture_type = TT_cube_map_array;
4935 } else {
4936 depth = 6;
4937 texture_type = TT_cube_map;
4938 }
4939
4940 } else if (height > 0) {
4941 if (num_array_elements > 0) {
4942 depth = num_array_elements;
4943 texture_type = TT_2d_texture_array;
4944 } else {
4945 depth = 1;
4946 texture_type = TT_2d_texture;
4947 }
4948
4949 } else if (width > 0) {
4950 depth = 1;
4951 if (num_array_elements > 0) {
4952 height = num_array_elements;
4953 texture_type = TT_1d_texture_array;
4954 } else {
4955 height = 1;
4956 texture_type = TT_1d_texture;
4957 }
4958
4959 } else {
4960 gobj_cat.error()
4961 << filename << " has zero size\n";
4962 return false;
4963 }
4964
4965 do_setup_texture(cdata, texture_type, width, height, depth, type, format);
4966
4967 cdata->_orig_file_x_size = cdata->_x_size;
4968 cdata->_orig_file_y_size = cdata->_y_size;
4969 cdata->_compression = compression;
4970 cdata->_ram_image_compression = compression;
4971
4972 if (!header_only) {
4973 bool generate_mipmaps = false;
4974 if (num_mipmap_levels == 0) {
4975 generate_mipmaps = true;
4976 num_mipmap_levels = 1;
4977 }
4978
4979 for (uint32_t n = 0; n < num_mipmap_levels; ++n) {
4980 uint32_t image_size;
4981 if (big_endian) {
4982 image_size = ktx.get_be_uint32();
4983 } else {
4984 image_size = ktx.get_uint32();
4985 }
4986 PTA_uchar image;
4987
4988 if (compression == CM_off) {
4989 uint32_t row_size = do_get_expected_mipmap_x_size(cdata, (int)n) * cdata->_num_components * cdata->_component_width;
4990 uint32_t num_rows = do_get_expected_mipmap_y_size(cdata, (int)n) * do_get_expected_mipmap_z_size(cdata, (int)n);
4991 uint32_t row_padded = (row_size + 3) & ~3;
4992
4993 if (image_size == row_size * num_rows) {
4994 if (row_padded != row_size) {
4995 // Someone tightly packed the image. This is invalid, but because
4996 // we like it tightly packed too, we'll read it anyway.
4997 gobj_cat.warning()
4998 << filename << " does not have proper row padding for mipmap "
4999 "level " << n << "\n";
5000 }
5001 image = PTA_uchar::empty_array(image_size);
5002 ktx.extract_bytes(image.p(), image_size);
5003
5004 } else if (image_size != row_padded * num_rows) {
5005 gobj_cat.error()
5006 << filename << " has invalid image size " << image_size
5007 << " for mipmap level " << n << " (expected "
5008 << row_padded * num_rows << ")\n";
5009 return false;
5010
5011 } else {
5012 // Read it row by row.
5013 image = PTA_uchar::empty_array(row_size * num_rows);
5014 uint32_t skip = row_padded - row_size;
5015 unsigned char *p = image.p();
5016 for (uint32_t row = 0; row < num_rows; ++row) {
5017 ktx.extract_bytes(p, row_size);
5018 ktx.skip_bytes(skip);
5019 p += row_size;
5020 }
5021 }
5022
5023 // Swap red and blue channels if necessary to match Panda conventions.
5024 if (swap_bgr) {
5025 unsigned char *begin = image.p();
5026 const unsigned char *end = image.p() + image.size();
5027 size_t skip = cdata->_num_components;
5028 nassertr(skip == 3 || skip == 4, false);
5029
5030 switch (cdata->_component_width) {
5031 case 1:
5032 for (unsigned char *p = begin; p < end; p += skip) {
5033 swap(p[0], p[2]);
5034 }
5035 break;
5036 case 2:
5037 for (short *p = (short *)begin; p < (short *)end; p += skip) {
5038 swap(p[0], p[2]);
5039 }
5040 break;
5041 case 4:
5042 for (int *p = (int *)begin; p < (int *)end; p += skip) {
5043 swap(p[0], p[2]);
5044 }
5045 break;
5046 default:
5047 nassert_raise("unexpected channel count");
5048 return false;
5049 }
5050 }
5051
5052 do_set_ram_mipmap_image(cdata, (int)n, std::move(image),
5053 row_size * do_get_expected_mipmap_y_size(cdata, (int)n));
5054
5055 } else {
5056 // Compressed image. We'll trust that the file has the right size.
5057 image = PTA_uchar::empty_array(image_size);
5058 ktx.extract_bytes(image.p(), image_size);
5059 do_set_ram_mipmap_image(cdata, (int)n, std::move(image), image_size / depth);
5060 }
5061
5062 ktx.skip_bytes(3 - ((image_size + 3) & 3));
5063 }
5064
5065 cdata->_has_read_pages = true;
5066 cdata->_has_read_mipmaps = true;
5067 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
5068
5069 if (generate_mipmaps) {
5070 do_generate_ram_mipmap_images(cdata, false);
5071 }
5072 }
5073
5074 if (in.fail()) {
5075 gobj_cat.error()
5076 << filename << ": truncated KTX file.\n";
5077 return false;
5078 }
5079
5080 cdata->_loaded_from_image = true;
5081 cdata->_loaded_from_txo = true;
5082
5083 return true;
5084}
5085
5086/**
5087 * Internal method to write a series of pages and/or mipmap levels to disk
5088 * files.
5089 */
5090bool Texture::
5091do_write(CData *cdata,
5092 const Filename &fullpath, int z, int n, bool write_pages, bool write_mipmaps) {
5093 if (is_txo_filename(fullpath)) {
5094 if (!do_has_bam_rawdata(cdata)) {
5095 do_get_bam_rawdata(cdata);
5096 }
5097 nassertr(do_has_bam_rawdata(cdata), false);
5098 return do_write_txo_file(cdata, fullpath);
5099 }
5100
5101 if (!do_has_uncompressed_ram_image(cdata)) {
5102 do_get_uncompressed_ram_image(cdata);
5103 }
5104
5105 nassertr(do_has_ram_mipmap_image(cdata, n), false);
5106 nassertr(cdata->_ram_image_compression == CM_off, false);
5107
5108 if (write_pages && write_mipmaps) {
5109 // Write a sequence of pages * mipmap levels.
5110 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5111 int num_levels = cdata->_ram_images.size();
5112
5113 for (int n = 0; n < num_levels; ++n) {
5114 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
5115
5116 for (z = 0; z < num_pages; ++z) {
5117 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
5118
5119 if (!n_pattern.has_hash()) {
5120 gobj_cat.error()
5121 << "Filename requires two different hash sequences: " << fullpath
5122 << "\n";
5123 return false;
5124 }
5125
5126 if (!do_write_one(cdata, n_pattern.get_filename_index(n), z, n)) {
5127 return false;
5128 }
5129 }
5130 }
5131
5132 } else if (write_pages) {
5133 // Write a sequence of pages.
5134 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5135 if (!fullpath_pattern.has_hash()) {
5136 gobj_cat.error()
5137 << "Filename requires a hash mark: " << fullpath
5138 << "\n";
5139 return false;
5140 }
5141
5142 int num_pages = cdata->_z_size * cdata->_num_views;
5143 for (z = 0; z < num_pages; ++z) {
5144 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(z), z, n)) {
5145 return false;
5146 }
5147 }
5148
5149 } else if (write_mipmaps) {
5150 // Write a sequence of mipmap images.
5151 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5152 if (!fullpath_pattern.has_hash()) {
5153 gobj_cat.error()
5154 << "Filename requires a hash mark: " << fullpath
5155 << "\n";
5156 return false;
5157 }
5158
5159 int num_levels = cdata->_ram_images.size();
5160 for (int n = 0; n < num_levels; ++n) {
5161 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(n), z, n)) {
5162 return false;
5163 }
5164 }
5165
5166 } else {
5167 // Write a single file.
5168 if (!do_write_one(cdata, fullpath, z, n)) {
5169 return false;
5170 }
5171 }
5172
5173 return true;
5174}
5175
5176/**
5177 * Internal method to write the indicated page and mipmap level to a disk
5178 * image file.
5179 */
5180bool Texture::
5181do_write_one(CData *cdata, const Filename &fullpath, int z, int n) {
5182 if (!do_has_ram_mipmap_image(cdata, n)) {
5183 return false;
5184 }
5185
5186 nassertr(cdata->_ram_image_compression == CM_off, false);
5187
5188 bool success;
5189 if (cdata->_component_type == T_float) {
5190 // Writing a floating-point texture.
5191 PfmFile pfm;
5192 if (!do_store_one(cdata, pfm, z, n)) {
5193 return false;
5194 }
5195 success = pfm.write(fullpath);
5196 } else {
5197 // Writing a normal, integer texture.
5198 PNMFileType *type =
5200 if (type == nullptr) {
5201 gobj_cat.error()
5202 << "Texture::write() - couldn't determine type from extension: " << fullpath << endl;
5203 return false;
5204 }
5205
5206 PNMImage pnmimage;
5207 if (!do_store_one(cdata, pnmimage, z, n)) {
5208 return false;
5209 }
5210 success = pnmimage.write(fullpath, type);
5211 }
5212
5213 if (!success) {
5214 gobj_cat.error()
5215 << "Texture::write() - couldn't write: " << fullpath << endl;
5216 return false;
5217 }
5218
5219 return true;
5220}
5221
5222/**
5223 * Internal method to copy a page and/or mipmap level to a PNMImage.
5224 */
5225bool Texture::
5226do_store_one(CData *cdata, PNMImage &pnmimage, int z, int n) {
5227 // First, reload the ram image if necessary.
5228 do_get_uncompressed_ram_image(cdata);
5229
5230 if (!do_has_ram_mipmap_image(cdata, n)) {
5231 return false;
5232 }
5233
5234 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5235 nassertr(cdata->_ram_image_compression == CM_off, false);
5236
5237 if (cdata->_component_type == T_float) {
5238 // PNMImage by way of PfmFile.
5239 PfmFile pfm;
5240 bool success = convert_to_pfm(pfm,
5241 do_get_expected_mipmap_x_size(cdata, n),
5242 do_get_expected_mipmap_y_size(cdata, n),
5243 cdata->_num_components, cdata->_component_width,
5244 cdata->_ram_images[n]._image,
5245 do_get_ram_mipmap_page_size(cdata, n), z);
5246 if (!success) {
5247 return false;
5248 }
5249 return pfm.store(pnmimage);
5250 }
5251
5252 return convert_to_pnmimage(pnmimage,
5253 do_get_expected_mipmap_x_size(cdata, n),
5254 do_get_expected_mipmap_y_size(cdata, n),
5255 cdata->_num_components, cdata->_component_type,
5256 is_srgb(cdata->_format),
5257 cdata->_ram_images[n]._image,
5258 do_get_ram_mipmap_page_size(cdata, n), z);
5259}
5260
5261/**
5262 * Internal method to copy a page and/or mipmap level to a PfmFile.
5263 */
5264bool Texture::
5265do_store_one(CData *cdata, PfmFile &pfm, int z, int n) {
5266 // First, reload the ram image if necessary.
5267 do_get_uncompressed_ram_image(cdata);
5268
5269 if (!do_has_ram_mipmap_image(cdata, n)) {
5270 return false;
5271 }
5272
5273 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5274 nassertr(cdata->_ram_image_compression == CM_off, false);
5275
5276 if (cdata->_component_type != T_float) {
5277 // PfmFile by way of PNMImage.
5278 PNMImage pnmimage;
5279 bool success =
5280 convert_to_pnmimage(pnmimage,
5281 do_get_expected_mipmap_x_size(cdata, n),
5282 do_get_expected_mipmap_y_size(cdata, n),
5283 cdata->_num_components, cdata->_component_type,
5284 is_srgb(cdata->_format),
5285 cdata->_ram_images[n]._image,
5286 do_get_ram_mipmap_page_size(cdata, n), z);
5287 if (!success) {
5288 return false;
5289 }
5290 return pfm.load(pnmimage);
5291 }
5292
5293 return convert_to_pfm(pfm,
5294 do_get_expected_mipmap_x_size(cdata, n),
5295 do_get_expected_mipmap_y_size(cdata, n),
5296 cdata->_num_components, cdata->_component_width,
5297 cdata->_ram_images[n]._image,
5298 do_get_ram_mipmap_page_size(cdata, n), z);
5299}
5300
5301/**
5302 * Called internally when write() detects a txo filename.
5303 */
5304bool Texture::
5305do_write_txo_file(const CData *cdata, const Filename &fullpath) const {
5307 Filename filename = Filename::binary_filename(fullpath);
5308 ostream *out = vfs->open_write_file(filename, true, true);
5309 if (out == nullptr) {
5310 gobj_cat.error()
5311 << "Unable to open " << filename << "\n";
5312 return false;
5313 }
5314
5315 bool success = do_write_txo(cdata, *out, fullpath);
5316 vfs->close_write_file(out);
5317 return success;
5318}
5319
5320/**
5321 *
5322 */
5323bool Texture::
5324do_write_txo(const CData *cdata, ostream &out, const string &filename) const {
5325 DatagramOutputFile dout;
5326
5327 if (!dout.open(out, filename)) {
5328 gobj_cat.error()
5329 << "Could not write texture object: " << filename << "\n";
5330 return false;
5331 }
5332
5333 if (!dout.write_header(_bam_header)) {
5334 gobj_cat.error()
5335 << "Unable to write to " << filename << "\n";
5336 return false;
5337 }
5338
5339 BamWriter writer(&dout);
5340 if (!writer.init()) {
5341 return false;
5342 }
5343
5344 writer.set_file_texture_mode(BamWriter::BTM_rawdata);
5345
5346 if (!writer.write_object(this)) {
5347 return false;
5348 }
5349
5350 if (!do_has_bam_rawdata(cdata)) {
5351 gobj_cat.error()
5352 << get_name() << " does not have ram image\n";
5353 return false;
5354 }
5355
5356 return true;
5357}
5358
5359/**
5360 * If the texture has a ram image already, this acquires the CData write lock
5361 * and returns it.
5362 *
5363 * If the texture lacks a ram image, this performs do_reload_ram_image(), but
5364 * without holding the lock on this particular Texture object, to avoid
5365 * holding the lock across what might be a slow operation. Instead, the
5366 * reload is performed in a copy of the texture object, and then the lock is
5367 * acquired and the data is copied in.
5368 *
5369 * In any case, the return value is a locked CData object, which must be
5370 * released with an explicit call to release_write(). The CData object will
5371 * have a ram image unless for some reason do_reload_ram_image() fails.
5372 */
5373Texture::CData *Texture::
5374unlocked_ensure_ram_image(bool allow_compression) {
5375 Thread *current_thread = Thread::get_current_thread();
5376
5377 // First, wait for any other threads that might be simultaneously performing
5378 // the same operation.
5379 MutexHolder holder(_lock);
5380 while (_reloading) {
5381 _cvar.wait();
5382 }
5383
5384 // Then make sure we still need to reload before continuing.
5385 const CData *cdata = _cycler.read(current_thread);
5386 bool has_ram_image = do_has_ram_image(cdata);
5387 if (has_ram_image && !allow_compression && cdata->_ram_image_compression != Texture::CM_off) {
5388 // If we don't want compression, but the ram image we have is pre-
5389 // compressed, we don't consider it.
5390 has_ram_image = false;
5391 }
5392 if (has_ram_image || !do_can_reload(cdata)) {
5393 // We don't need to reload after all, or maybe we can't reload anyway.
5394 // Return, but elevate the lock first, as we promised.
5395 return _cycler.elevate_read_upstream(cdata, false, current_thread);
5396 }
5397
5398 // We need to reload.
5399 nassertr(!_reloading, nullptr);
5400 _reloading = true;
5401
5402 PT(Texture) tex = do_make_copy(cdata);
5403 _cycler.release_read(cdata);
5404 _lock.unlock();
5405
5406 // Perform the actual reload in a copy of the texture, while our own mutex
5407 // is left unlocked.
5408 CDWriter cdata_tex(tex->_cycler, true);
5409 tex->do_reload_ram_image(cdata_tex, allow_compression);
5410
5411 _lock.lock();
5412
5413 CData *cdataw = _cycler.write_upstream(false, current_thread);
5414
5415 // Rather than calling do_assign(), which would copy *all* of the reloaded
5416 // texture's properties over, we only copy in the ones which are relevant to
5417 // the ram image. This way, if the properties have changed during the
5418 // reload (for instance, because we reloaded a txo), it won't contaminate
5419 // the original texture.
5420 cdataw->_orig_file_x_size = cdata_tex->_orig_file_x_size;
5421 cdataw->_orig_file_y_size = cdata_tex->_orig_file_y_size;
5422
5423 // If any of *these* properties have changed, the texture has changed in
5424 // some fundamental way. Update it appropriately.
5425 if (cdata_tex->_x_size != cdataw->_x_size ||
5426 cdata_tex->_y_size != cdataw->_y_size ||
5427 cdata_tex->_z_size != cdataw->_z_size ||
5428 cdata_tex->_num_views != cdataw->_num_views ||
5429 cdata_tex->_num_components != cdataw->_num_components ||
5430 cdata_tex->_component_width != cdataw->_component_width ||
5431 cdata_tex->_texture_type != cdataw->_texture_type ||
5432 cdata_tex->_component_type != cdataw->_component_type) {
5433
5434 cdataw->_x_size = cdata_tex->_x_size;
5435 cdataw->_y_size = cdata_tex->_y_size;
5436 cdataw->_z_size = cdata_tex->_z_size;
5437 cdataw->_num_views = cdata_tex->_num_views;
5438
5439 cdataw->_num_components = cdata_tex->_num_components;
5440 cdataw->_component_width = cdata_tex->_component_width;
5441 cdataw->_texture_type = cdata_tex->_texture_type;
5442 cdataw->_format = cdata_tex->_format;
5443 cdataw->_component_type = cdata_tex->_component_type;
5444
5445 cdataw->inc_properties_modified();
5446 cdataw->inc_image_modified();
5447 }
5448
5449 cdataw->_keep_ram_image = cdata_tex->_keep_ram_image;
5450 cdataw->_ram_image_compression = cdata_tex->_ram_image_compression;
5451 cdataw->_ram_images = cdata_tex->_ram_images;
5452
5453 nassertr(_reloading, nullptr);
5454 _reloading = false;
5455
5456 // We don't generally increment the cdata->_image_modified semaphore,
5457 // because this is just a reload, and presumably the image hasn't changed
5458 // (unless we hit the if condition above).
5459
5460 _cvar.notify_all();
5461
5462 // Return the still-locked cdata.
5463 return cdataw;
5464}
5465
5466/**
5467 * Called when the Texture image is required but the ram image is not
5468 * available, this will reload it from disk or otherwise do whatever is
5469 * required to make it available, if possible.
5470 *
5471 * Assumes the lock is already held. The lock will be held during the
5472 * duration of this operation.
5473 */
5474void Texture::
5475do_reload_ram_image(CData *cdata, bool allow_compression) {
5477 PT(BamCacheRecord) record;
5478
5479 if (!do_has_compression(cdata)) {
5480 allow_compression = false;
5481 }
5482
5483 if ((cache->get_cache_textures() || (allow_compression && cache->get_cache_compressed_textures())) && !textures_header_only) {
5484 // See if the texture can be found in the on-disk cache, if it is active.
5485
5486 record = cache->lookup(cdata->_fullpath, "txo");
5487 if (record != nullptr &&
5488 record->has_data()) {
5489 PT(Texture) tex = DCAST(Texture, record->get_data());
5490
5491 // But don't use the cache record if the config parameters have changed,
5492 // and we want a different-sized texture now.
5493 int x_size = cdata->_orig_file_x_size;
5494 int y_size = cdata->_orig_file_y_size;
5495 do_adjust_this_size(cdata, x_size, y_size, cdata->_filename.get_basename(), true);
5496 if (x_size != tex->get_x_size() || y_size != tex->get_y_size()) {
5497 if (gobj_cat.is_debug()) {
5498 gobj_cat.debug()
5499 << "Cached texture " << *this << " has size "
5500 << tex->get_x_size() << " x " << tex->get_y_size()
5501 << " instead of " << x_size << " x " << y_size
5502 << "; ignoring cache.\n";
5503 }
5504 } else {
5505 // Also don't keep the cached version if it's compressed but we want
5506 // uncompressed.
5507 if (!allow_compression && tex->get_ram_image_compression() != Texture::CM_off) {
5508 if (gobj_cat.is_debug()) {
5509 gobj_cat.debug()
5510 << "Cached texture " << *this
5511 << " is compressed in cache; ignoring cache.\n";
5512 }
5513 } else {
5514 gobj_cat.info()
5515 << "Texture " << get_name() << " reloaded from disk cache\n";
5516 // We don't want to replace all the texture parameters--for
5517 // instance, we don't want to change the filter type or the border
5518 // color or anything--we just want to get the image and necessary
5519 // associated parameters.
5520 CDReader cdata_tex(tex->_cycler);
5521 cdata->_x_size = cdata_tex->_x_size;
5522 cdata->_y_size = cdata_tex->_y_size;
5523 if (cdata->_num_components != cdata_tex->_num_components) {
5524 cdata->_num_components = cdata_tex->_num_components;
5525 cdata->_format = cdata_tex->_format;
5526 }
5527 cdata->_component_type = cdata_tex->_component_type;
5528 cdata->_compression = cdata_tex->_compression;
5529 cdata->_ram_image_compression = cdata_tex->_ram_image_compression;
5530 cdata->_ram_images = cdata_tex->_ram_images;
5531 cdata->_loaded_from_image = true;
5532
5533 bool was_compressed = (cdata->_ram_image_compression != CM_off);
5534 if (do_consider_auto_process_ram_image(cdata, uses_mipmaps(), allow_compression)) {
5535 bool is_compressed = (cdata->_ram_image_compression != CM_off);
5536 if (!was_compressed && is_compressed &&
5538 // We've re-compressed the image after loading it from the
5539 // cache. To keep the cache current, rewrite it to the cache
5540 // now, in its newly compressed form.
5541 record->set_data(this, this);
5542 cache->store(record);
5543 }
5544 }
5545
5546 return;
5547 }
5548 }
5549 }
5550 }
5551
5552 gobj_cat.info()
5553 << "Reloading texture " << get_name() << "\n";
5554
5555 int z = 0;
5556 int n = 0;
5557
5558 if (cdata->_has_read_pages) {
5559 z = cdata->_z_size;
5560 }
5561 if (cdata->_has_read_mipmaps) {
5562 n = cdata->_num_mipmap_levels_read;
5563 }
5564
5565 cdata->_loaded_from_image = false;
5566 Format orig_format = cdata->_format;
5567 int orig_num_components = cdata->_num_components;
5568
5569 LoaderOptions options;
5570 if (allow_compression) {
5571 options.set_texture_flags(LoaderOptions::TF_preload |
5572 LoaderOptions::TF_allow_compression);
5573 } else {
5574 options.set_texture_flags(LoaderOptions::TF_preload);
5575 }
5576 do_read(cdata, cdata->_fullpath, cdata->_alpha_fullpath,
5577 cdata->_primary_file_num_channels, cdata->_alpha_file_channel,
5578 z, n, cdata->_has_read_pages, cdata->_has_read_mipmaps, options, nullptr);
5579
5580 if (orig_num_components == cdata->_num_components) {
5581 // Restore the original format, in case it was needlessly changed during
5582 // the reload operation.
5583 cdata->_format = orig_format;
5584 }
5585
5586 if (do_has_ram_image(cdata) && record != nullptr) {
5587 if (cache->get_cache_textures() || (cdata->_ram_image_compression != CM_off && cache->get_cache_compressed_textures())) {
5588 // Update the cache.
5589 if (record != nullptr) {
5590 record->add_dependent_file(cdata->_fullpath);
5591 }
5592 record->set_data(this, this);
5593 cache->store(record);
5594 }
5595 }
5596}
5597
5598/**
5599 * This is called internally to uniquify the ram image pointer without
5600 * updating cdata->_image_modified.
5601 */
5602PTA_uchar Texture::
5603do_modify_ram_image(CData *cdata) {
5604 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty() ||
5605 cdata->_ram_image_compression != CM_off) {
5606 do_make_ram_image(cdata);
5607 } else {
5608 do_clear_ram_mipmap_images(cdata);
5609 }
5610 return cdata->_ram_images[0]._image;
5611}
5612
5613/**
5614 * This is called internally to make a new ram image without updating
5615 * cdata->_image_modified.
5616 */
5617PTA_uchar Texture::
5618do_make_ram_image(CData *cdata) {
5619 int image_size = do_get_expected_ram_image_size(cdata);
5620 cdata->_ram_images.clear();
5621 cdata->_ram_images.push_back(RamImage());
5622 cdata->_ram_images[0]._page_size = do_get_expected_ram_page_size(cdata);
5623 cdata->_ram_images[0]._image = PTA_uchar::empty_array(image_size, get_class_type());
5624 cdata->_ram_images[0]._pointer_image = nullptr;
5625 cdata->_ram_image_compression = CM_off;
5626
5627 if (cdata->_has_clear_color) {
5628 // Fill the image with the clear color.
5629 unsigned char pixel[16];
5630 const int pixel_size = do_get_clear_data(cdata, pixel);
5631 nassertr(pixel_size > 0, cdata->_ram_images[0]._image);
5632
5633 unsigned char *image_data = cdata->_ram_images[0]._image;
5634 for (int i = 0; i < image_size; i += pixel_size) {
5635 memcpy(image_data + i, pixel, pixel_size);
5636 }
5637 }
5638
5639 return cdata->_ram_images[0]._image;
5640}
5641
5642/**
5643 * Replaces the current system-RAM image with the new data. If compression is
5644 * not CM_off, it indicates that the new data is already pre-compressed in the
5645 * indicated format.
5646 *
5647 * This does *not* affect keep_ram_image.
5648 */
5649void Texture::
5650do_set_ram_image(CData *cdata, CPTA_uchar image, Texture::CompressionMode compression,
5651 size_t page_size) {
5652 nassertv(compression != CM_default);
5653 nassertv(compression != CM_off || image.size() == do_get_expected_ram_image_size(cdata));
5654 if (cdata->_ram_images.empty()) {
5655 cdata->_ram_images.push_back(RamImage());
5656 } else {
5657 do_clear_ram_mipmap_images(cdata);
5658 }
5659 if (page_size == 0) {
5660 page_size = image.size();
5661 }
5662 if (cdata->_ram_images[0]._image != image ||
5663 cdata->_ram_images[0]._page_size != page_size ||
5664 cdata->_ram_image_compression != compression) {
5665 cdata->_ram_images[0]._image = image.cast_non_const();
5666 cdata->_ram_images[0]._page_size = page_size;
5667 cdata->_ram_images[0]._pointer_image = nullptr;
5668 cdata->_ram_image_compression = compression;
5669 cdata->inc_image_modified();
5670 }
5671}
5672
5673/**
5674 * This is called internally to uniquify the nth mipmap image pointer without
5675 * updating cdata->_image_modified.
5676 */
5677PTA_uchar Texture::
5678do_modify_ram_mipmap_image(CData *cdata, int n) {
5679 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar());
5680
5681 if (n >= (int)cdata->_ram_images.size() ||
5682 cdata->_ram_images[n]._image.empty()) {
5683 do_make_ram_mipmap_image(cdata, n);
5684 }
5685 return cdata->_ram_images[n]._image;
5686}
5687
5688/**
5689 *
5690 */
5691PTA_uchar Texture::
5692do_make_ram_mipmap_image(CData *cdata, int n) {
5693 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar(get_class_type()));
5694
5695 while (n >= (int)cdata->_ram_images.size()) {
5696 cdata->_ram_images.push_back(RamImage());
5697 }
5698
5699 size_t image_size = do_get_expected_ram_mipmap_image_size(cdata, n);
5700 cdata->_ram_images[n]._image = PTA_uchar::empty_array(image_size, get_class_type());
5701 cdata->_ram_images[n]._pointer_image = nullptr;
5702 cdata->_ram_images[n]._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
5703
5704 if (cdata->_has_clear_color) {
5705 // Fill the image with the clear color.
5706 unsigned char pixel[16];
5707 const size_t pixel_size = (size_t)do_get_clear_data(cdata, pixel);
5708 nassertr(pixel_size > 0, cdata->_ram_images[n]._image);
5709
5710 unsigned char *image_data = cdata->_ram_images[n]._image;
5711 for (size_t i = 0; i < image_size; i += pixel_size) {
5712 memcpy(image_data + i, pixel, pixel_size);
5713 }
5714 }
5715
5716 return cdata->_ram_images[n]._image;
5717}
5718
5719/**
5720 *
5721 */
5722void Texture::
5723do_set_ram_mipmap_image(CData *cdata, int n, CPTA_uchar image, size_t page_size) {
5724 nassertv(cdata->_ram_image_compression != CM_off || image.size() == do_get_expected_ram_mipmap_image_size(cdata, n));
5725
5726 while (n >= (int)cdata->_ram_images.size()) {
5727 cdata->_ram_images.push_back(RamImage());
5728 }
5729 if (page_size == 0) {
5730 page_size = image.size();
5731 }
5732
5733 if (cdata->_ram_images[n]._image != image ||
5734 cdata->_ram_images[n]._page_size != page_size) {
5735 cdata->_ram_images[n]._image = image.cast_non_const();
5736 cdata->_ram_images[n]._pointer_image = nullptr;
5737 cdata->_ram_images[n]._page_size = page_size;
5738 cdata->inc_image_modified();
5739 }
5740}
5741
5742/**
5743 * Returns a string with a single pixel representing the clear color of the
5744 * texture in the format of this texture.
5745 *
5746 * In other words, to create an uncompressed RAM texture filled with the clear
5747 * color, it should be initialized with this string repeated for every pixel.
5748 */
5749size_t Texture::
5750do_get_clear_data(const CData *cdata, unsigned char *into) const {
5751 nassertr(cdata->_has_clear_color, 0);
5752
5753 int num_components = cdata->_num_components;
5754 nassertr(num_components > 0, 0);
5755 nassertr(num_components <= 4, 0);
5756
5757 LVecBase4 clear_value = cdata->_clear_color;
5758
5759 // Swap red and blue components.
5760 if (num_components >= 3) {
5761 std::swap(clear_value[0], clear_value[2]);
5762 }
5763
5764 switch (cdata->_component_type) {
5765 case T_unsigned_byte:
5766 if (is_srgb(cdata->_format)) {
5767 xel color;
5768 xelval alpha;
5769 encode_sRGB_uchar(clear_value, color, alpha);
5770 switch (num_components) {
5771 case 4: into[3] = (unsigned char)alpha;
5772 case 3: into[2] = (unsigned char)color.b;
5773 case 2: into[1] = (unsigned char)color.g;
5774 case 1: into[0] = (unsigned char)color.r;
5775 }
5776 } else {
5777 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5778 scaled *= 255;
5779 for (int i = 0; i < num_components; ++i) {
5780 into[i] = (unsigned char)scaled[i];
5781 }
5782 }
5783 break;
5784
5785 case T_unsigned_short:
5786 {
5787 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5788 scaled *= 65535;
5789 for (int i = 0; i < num_components; ++i) {
5790 ((unsigned short *)into)[i] = (unsigned short)scaled[i];
5791 }
5792 break;
5793 }
5794
5795 case T_float:
5796 for (int i = 0; i < num_components; ++i) {
5797 ((float *)into)[i] = clear_value[i];
5798 }
5799 break;
5800
5801 case T_unsigned_int_24_8:
5802 nassertr(num_components == 1, 0);
5803 *((unsigned int *)into) =
5804 ((unsigned int)(clear_value[0] * 16777215) << 8) +
5805 (unsigned int)max(min(clear_value[1], (PN_stdfloat)255), (PN_stdfloat)0);
5806 break;
5807
5808 case T_int:
5809 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5810 // normalization here, either.
5811 for (int i = 0; i < num_components; ++i) {
5812 ((int *)into)[i] = (int)clear_value[i];
5813 }
5814 break;
5815
5816 case T_byte:
5817 {
5818 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5819 scaled *= 127;
5820 for (int i = 0; i < num_components; ++i) {
5821 ((signed char *)into)[i] = (signed char)scaled[i];
5822 }
5823 break;
5824 }
5825
5826 case T_short:
5827 {
5828 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5829 scaled *= 32767;
5830 for (int i = 0; i < num_components; ++i) {
5831 ((short *)into)[i] = (short)scaled[i];
5832 }
5833 break;
5834 }
5835
5836 case T_half_float:
5837 for (int i = 0; i < num_components; ++i) {
5838 union {
5839 uint32_t ui;
5840 float uf;
5841 } v;
5842 v.uf = clear_value[i];
5843 uint16_t sign = ((v.ui & 0x80000000u) >> 16u);
5844 uint32_t mantissa = (v.ui & 0x007fffffu);
5845 uint16_t exponent = (uint16_t)std::min(std::max((int)((v.ui & 0x7f800000u) >> 23u) - 112, 0), 31);
5846 mantissa += (mantissa & 0x00001000u) << 1u;
5847 ((uint16_t *)into)[i] = (uint16_t)(sign | ((exponent << 10u) | (mantissa >> 13u)));
5848 }
5849 break;
5850
5851 case T_unsigned_int:
5852 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5853 // normalization here, either.
5854 for (int i = 0; i < num_components; ++i) {
5855 ((unsigned int *)into)[i] = (unsigned int)clear_value[i];
5856 }
5857 }
5858
5859 return num_components * cdata->_component_width;
5860}
5861
5862/**
5863 * Should be called after a texture has been loaded into RAM, this considers
5864 * generating mipmaps and/or compressing the RAM image.
5865 *
5866 * Returns true if the image was modified by this operation, false if it
5867 * wasn't.
5868 */
5869bool Texture::
5870consider_auto_process_ram_image(bool generate_mipmaps, bool allow_compression) {
5871 CDWriter cdata(_cycler, false);
5872 return do_consider_auto_process_ram_image(cdata, generate_mipmaps, allow_compression);
5873}
5874
5875/**
5876 * Should be called after a texture has been loaded into RAM, this considers
5877 * generating mipmaps and/or compressing the RAM image.
5878 *
5879 * Returns true if the image was modified by this operation, false if it
5880 * wasn't.
5881 */
5882bool Texture::
5883do_consider_auto_process_ram_image(CData *cdata, bool generate_mipmaps,
5884 bool allow_compression) {
5885 bool modified = false;
5886
5887 if (generate_mipmaps && !driver_generate_mipmaps &&
5888 cdata->_ram_images.size() == 1) {
5889 do_generate_ram_mipmap_images(cdata, false);
5890 modified = true;
5891 }
5892
5893 if (allow_compression && !driver_compress_textures) {
5894 CompressionMode compression = cdata->_compression;
5895 if (compression == CM_default && compressed_textures) {
5896 if (cdata->_texture_type == Texture::TT_buffer_texture) {
5897 compression = CM_off;
5898 }
5899 else {
5900 compression = CM_on;
5901 }
5902 }
5903 if (compression != CM_off && cdata->_ram_image_compression == CM_off) {
5905 if (do_compress_ram_image(cdata, compression, QL_default, gsg)) {
5906 if (gobj_cat.is_debug()) {
5907 gobj_cat.debug()
5908 << "Compressed " << get_name() << " with "
5909 << cdata->_ram_image_compression << "\n";
5910 }
5911 modified = true;
5912 }
5913 }
5914 }
5915
5916 return modified;
5917}
5918
5919/**
5920 *
5921 */
5922bool Texture::
5923do_compress_ram_image(CData *cdata, Texture::CompressionMode compression,
5924 Texture::QualityLevel quality_level,
5926 nassertr(compression != CM_off, false);
5927
5928 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
5929 return false;
5930 }
5931
5932 if (compression == CM_on) {
5933 // Select an appropriate compression mode automatically.
5934 switch (cdata->_format) {
5935 case Texture::F_rgbm:
5936 case Texture::F_rgb:
5937 case Texture::F_rgb5:
5938 case Texture::F_rgba5:
5939 case Texture::F_rgb8:
5940 case Texture::F_rgb12:
5941 case Texture::F_rgb332:
5942 case Texture::F_rgb16:
5943 case Texture::F_rgb32:
5944 case Texture::F_rgb10_a2:
5945 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt1)) {
5946 compression = CM_dxt1;
5947 } else if (gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5948 compression = CM_dxt3;
5949 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5950 compression = CM_dxt5;
5951 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5952 compression = CM_etc2;
5953 } else if (gsg->get_supports_compressed_texture_format(CM_etc1)) {
5954 compression = CM_etc1;
5955 }
5956 break;
5957
5958 case Texture::F_rgba4:
5959 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5960 compression = CM_dxt3;
5961 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5962 compression = CM_dxt5;
5963 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5964 compression = CM_etc2;
5965 }
5966 break;
5967
5968 case Texture::F_rgba:
5969 case Texture::F_rgba8:
5970 case Texture::F_rgba12:
5971 case Texture::F_rgba16:
5972 case Texture::F_rgba32:
5973 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5974 compression = CM_dxt5;
5975 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5976 compression = CM_etc2;
5977 }
5978 break;
5979
5980 case Texture::F_red:
5981 case Texture::F_rg:
5982 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_rgtc)) {
5983 compression = CM_rgtc;
5984 } else if (gsg->get_supports_compressed_texture_format(CM_eac)) {
5985 compression = CM_eac;
5986 }
5987 break;
5988
5989 default:
5990 break;
5991 }
5992 }
5993
5994 // Choose an appropriate quality level.
5995 if (quality_level == Texture::QL_default) {
5996 quality_level = cdata->_quality_level;
5997 }
5998 if (quality_level == Texture::QL_default) {
5999 quality_level = texture_quality_level;
6000 }
6001
6002 if (compression == CM_rgtc) {
6003 // We should compress RGTC ourselves, as squish does not support it.
6004 if (cdata->_component_type != T_unsigned_byte) {
6005 return false;
6006 }
6007
6008 if (!do_has_all_ram_mipmap_images(cdata)) {
6009 // If we're about to compress the RAM image, we should ensure that we
6010 // have all of the mipmap levels first.
6011 do_generate_ram_mipmap_images(cdata, false);
6012 }
6013
6014 RamImages compressed_ram_images;
6015 compressed_ram_images.resize(cdata->_ram_images.size());
6016
6017 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6018 const RamImage *uncompressed_image = &cdata->_ram_images[n];
6019
6020 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6021 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6022 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6023
6024 // It is important that we handle image sizes that aren't a multiple of
6025 // the block size, since this method may be used to compress mipmaps,
6026 // which go all the way to 1x1. Pad the image if necessary.
6027 RamImage temp_image;
6028 if ((x_size | y_size) & 0x3) {
6029 int virtual_x_size = x_size;
6030 int virtual_y_size = y_size;
6031 x_size = (x_size + 3) & ~0x3;
6032 y_size = (y_size + 3) & ~0x3;
6033
6034 temp_image._page_size = x_size * y_size * cdata->_num_components;
6035 temp_image._image = PTA_uchar::empty_array(temp_image._page_size * num_pages);
6036
6037 for (int z = 0; z < num_pages; ++z) {
6038 unsigned char *dest = temp_image._image.p() + z * temp_image._page_size;
6039 unsigned const char *src = uncompressed_image->_image.p() + z * uncompressed_image->_page_size;
6040
6041 for (int y = 0; y < virtual_y_size; ++y) {
6042 memcpy(dest, src, virtual_x_size);
6043 src += virtual_x_size;
6044 dest += x_size;
6045 }
6046 }
6047
6048 uncompressed_image = &temp_image;
6049 }
6050
6051 // Create a new image to hold the compressed texture pages.
6052 RamImage &compressed_image = compressed_ram_images[n];
6053 compressed_image._page_size = (x_size * y_size * cdata->_num_components) >> 1;
6054 compressed_image._image = PTA_uchar::empty_array(compressed_image._page_size * num_pages);
6055
6056 if (cdata->_num_components == 1) {
6057 do_compress_ram_image_bc4(*uncompressed_image, compressed_image,
6058 x_size, y_size, num_pages);
6059 } else if (cdata->_num_components == 2) {
6060 do_compress_ram_image_bc5(*uncompressed_image, compressed_image,
6061 x_size, y_size, num_pages);
6062 } else {
6063 // Invalid.
6064 return false;
6065 }
6066 }
6067
6068 cdata->_ram_images.swap(compressed_ram_images);
6069 cdata->_ram_image_compression = CM_rgtc;
6070 return true;
6071 }
6072
6073#ifdef HAVE_SQUISH
6074 if (cdata->_texture_type != TT_3d_texture &&
6075 cdata->_texture_type != TT_2d_texture_array &&
6076 cdata->_component_type == T_unsigned_byte) {
6077 int squish_flags = 0;
6078 switch (compression) {
6079 case CM_dxt1:
6080 squish_flags |= squish::kDxt1;
6081 break;
6082
6083 case CM_dxt3:
6084 squish_flags |= squish::kDxt3;
6085 break;
6086
6087 case CM_dxt5:
6088 squish_flags |= squish::kDxt5;
6089 break;
6090
6091 default:
6092 break;
6093 }
6094
6095 if (squish_flags != 0) {
6096 // This compression mode is supported by squish; use it.
6097 switch (quality_level) {
6098 case QL_fastest:
6099 squish_flags |= squish::kColourRangeFit;
6100 break;
6101
6102 case QL_normal:
6103 // ColourClusterFit is just too slow for everyday use.
6104 squish_flags |= squish::kColourRangeFit;
6105 // squish_flags |= squish::kColourClusterFit;
6106 break;
6107
6108 case QL_best:
6109 squish_flags |= squish::kColourIterativeClusterFit;
6110 break;
6111
6112 default:
6113 break;
6114 }
6115
6116 if (do_squish(cdata, compression, squish_flags)) {
6117 return true;
6118 }
6119 }
6120 }
6121#endif // HAVE_SQUISH
6122
6123 return false;
6124}
6125
6126/**
6127 *
6128 */
6129bool Texture::
6130do_uncompress_ram_image(CData *cdata) {
6131 nassertr(!cdata->_ram_images.empty(), false);
6132
6133 if (cdata->_ram_image_compression == CM_rgtc) {
6134 // We should decompress RGTC ourselves, as squish doesn't support it.
6135 RamImages uncompressed_ram_images;
6136 uncompressed_ram_images.resize(cdata->_ram_images.size());
6137
6138 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6139 const RamImage &compressed_image = cdata->_ram_images[n];
6140
6141 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6142 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6143 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6144
6145 RamImage &uncompressed_image = uncompressed_ram_images[n];
6146 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
6147 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
6148
6149 if (cdata->_num_components == 1) {
6150 do_uncompress_ram_image_bc4(compressed_image, uncompressed_image,
6151 x_size, y_size, num_pages);
6152 } else if (cdata->_num_components == 2) {
6153 do_uncompress_ram_image_bc5(compressed_image, uncompressed_image,
6154 x_size, y_size, num_pages);
6155 } else {
6156 // Invalid.
6157 return false;
6158 }
6159 }
6160 cdata->_ram_images.swap(uncompressed_ram_images);
6161 cdata->_ram_image_compression = CM_off;
6162 return true;
6163 }
6164
6165#ifdef HAVE_SQUISH
6166 if (cdata->_texture_type != TT_3d_texture &&
6167 cdata->_texture_type != TT_2d_texture_array &&
6168 cdata->_component_type == T_unsigned_byte) {
6169 int squish_flags = 0;
6170 switch (cdata->_ram_image_compression) {
6171 case CM_dxt1:
6172 squish_flags |= squish::kDxt1;
6173 break;
6174
6175 case CM_dxt3:
6176 squish_flags |= squish::kDxt3;
6177 break;
6178
6179 case CM_dxt5:
6180 squish_flags |= squish::kDxt5;
6181 break;
6182
6183 default:
6184 break;
6185 }
6186
6187 if (squish_flags != 0) {
6188 // This compression mode is supported by squish; use it.
6189 if (do_unsquish(cdata, squish_flags)) {
6190 return true;
6191 }
6192 }
6193 }
6194#endif // HAVE_SQUISH
6195 return false;
6196}
6197
6198/**
6199 * Compresses a RAM image using BC4 compression.
6200 */
6201void Texture::
6202do_compress_ram_image_bc4(const RamImage &uncompressed_image,
6203 RamImage &compressed_image,
6204 int x_size, int y_size, int num_pages) {
6205 int x_blocks = (x_size >> 2);
6206 int y_blocks = (y_size >> 2);
6207
6208 // NB. This algorithm isn't fully optimal, since it doesn't try to make use
6209 // of the secondary interpolation mode supported by BC4. This is not
6210 // important for most textures, but it may be added in the future.
6211
6212 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 <= uncompressed_image._page_size);
6213 nassertv((size_t)x_size * (size_t)y_size == uncompressed_image._page_size);
6214
6215 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6216
6217 for (int z = 0; z < num_pages; ++z) {
6218 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6219 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6220
6221 // Convert one 4 x 4 block at a time.
6222 for (int y = 0; y < y_blocks; ++y) {
6223 for (int x = 0; x < x_blocks; ++x) {
6224 int a, b, c, d;
6225 float fac, add;
6226 unsigned char minv, maxv;
6227 unsigned const char *blk = src;
6228
6229 // Find the minimum and maximum value in the block.
6230 minv = blk[0];
6231 maxv = blk[0];
6232 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6233 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6234 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6235 blk += x_size;
6236 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6237 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6238 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6239 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6240 blk += x_size;
6241 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6242 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6243 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6244 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6245 blk += x_size;
6246 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6247 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6248 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6249 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6250
6251 // Now calculate the index for each pixel.
6252 blk = src;
6253 if (maxv > minv) {
6254 fac = 7.5f / (maxv - minv);
6255 } else {
6256 fac = 0;
6257 }
6258 add = -minv * fac;
6259 a = (remap[(int)(blk[0] * fac + add)])
6260 | (remap[(int)(blk[1] * fac + add)] << 3)
6261 | (remap[(int)(blk[2] * fac + add)] << 6)
6262 | (remap[(int)(blk[3] * fac + add)] << 9);
6263 blk += x_size;
6264 b = (remap[(int)(blk[0] * fac + add)] << 4)
6265 | (remap[(int)(blk[1] * fac + add)] << 7)
6266 | (remap[(int)(blk[2] * fac + add)] << 10)
6267 | (remap[(int)(blk[3] * fac + add)] << 13);
6268 blk += x_size;
6269 c = (remap[(int)(blk[0] * fac + add)])
6270 | (remap[(int)(blk[1] * fac + add)] << 3)
6271 | (remap[(int)(blk[2] * fac + add)] << 6)
6272 | (remap[(int)(blk[3] * fac + add)] << 9);
6273 blk += x_size;
6274 d = (remap[(int)(blk[0] * fac + add)] << 4)
6275 | (remap[(int)(blk[1] * fac + add)] << 7)
6276 | (remap[(int)(blk[2] * fac + add)] << 10)
6277 | (remap[(int)(blk[3] * fac + add)] << 13);
6278
6279 *(dest++) = maxv;
6280 *(dest++) = minv;
6281 *(dest++) = a & 0xff;
6282 *(dest++) = (a >> 8) | (b & 0xf0);
6283 *(dest++) = b >> 8;
6284 *(dest++) = c & 0xff;
6285 *(dest++) = (c >> 8) | (d & 0xf0);
6286 *(dest++) = d >> 8;
6287
6288 // Advance to the beginning of the next 4x4 block.
6289 src += 4;
6290 }
6291 src += x_size * 3;
6292 }
6294 }
6295}
6296
6297/**
6298 * Compresses a RAM image using BC5 compression.
6299 */
6300void Texture::
6301do_compress_ram_image_bc5(const RamImage &uncompressed_image,
6302 RamImage &compressed_image,
6303 int x_size, int y_size, int num_pages) {
6304 int x_blocks = (x_size >> 2);
6305 int y_blocks = (y_size >> 2);
6306 int stride = x_size * 2;
6307
6308 // BC5 uses the same compression algorithm as BC4, except repeated for two
6309 // channels.
6310
6311 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 * 2 <= uncompressed_image._page_size);
6312 nassertv((size_t)stride * (size_t)y_size == uncompressed_image._page_size);
6313
6314 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6315
6316 for (int z = 0; z < num_pages; ++z) {
6317 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6318 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6319
6320 // Convert one 4 x 4 block at a time.
6321 for (int y = 0; y < y_blocks; ++y) {
6322 for (int x = 0; x < x_blocks; ++x) {
6323 int a, b, c, d;
6324 float fac, add;
6325 unsigned char minv, maxv;
6326 unsigned const char *blk = src;
6327
6328 // Find the minimum and maximum red value in the block.
6329 minv = blk[0];
6330 maxv = blk[0];
6331 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6332 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6333 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6334 blk += stride;
6335 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6336 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6337 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6338 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6339 blk += stride;
6340 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6341 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6342 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6343 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6344 blk += stride;
6345 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6346 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6347 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6348 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6349
6350 // Now calculate the index for each pixel.
6351 if (maxv > minv) {
6352 fac = 7.5f / (maxv - minv);
6353 } else {
6354 fac = 0;
6355 }
6356 add = -minv * fac;
6357 blk = src;
6358 a = (remap[(int)(blk[0] * fac + add)])
6359 | (remap[(int)(blk[2] * fac + add)] << 3)
6360 | (remap[(int)(blk[4] * fac + add)] << 6)
6361 | (remap[(int)(blk[6] * fac + add)] << 9);
6362 blk += stride;
6363 b = (remap[(int)(blk[0] * fac + add)] << 4)
6364 | (remap[(int)(blk[2] * fac + add)] << 7)
6365 | (remap[(int)(blk[4] * fac + add)] << 10)
6366 | (remap[(int)(blk[6] * fac + add)] << 13);
6367 blk += stride;
6368 c = (remap[(int)(blk[0] * fac + add)])
6369 | (remap[(int)(blk[2] * fac + add)] << 3)
6370 | (remap[(int)(blk[4] * fac + add)] << 6)
6371 | (remap[(int)(blk[6] * fac + add)] << 9);
6372 blk += stride;
6373 d = (remap[(int)(blk[0] * fac + add)] << 4)
6374 | (remap[(int)(blk[2] * fac + add)] << 7)
6375 | (remap[(int)(blk[4] * fac + add)] << 10)
6376 | (remap[(int)(blk[6] * fac + add)] << 13);
6377
6378 *(dest++) = maxv;
6379 *(dest++) = minv;
6380 *(dest++) = a & 0xff;
6381 *(dest++) = (a >> 8) | (b & 0xf0);
6382 *(dest++) = b >> 8;
6383 *(dest++) = c & 0xff;
6384 *(dest++) = (c >> 8) | (d & 0xf0);
6385 *(dest++) = d >> 8;
6386
6387 // Find the minimum and maximum green value in the block.
6388 blk = src + 1;
6389 minv = blk[0];
6390 maxv = blk[0];
6391 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6392 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6393 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6394 blk += stride;
6395 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6396 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6397 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6398 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6399 blk += stride;
6400 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6401 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6402 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6403 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6404 blk += stride;
6405 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6406 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6407 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6408 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6409
6410 // Now calculate the index for each pixel.
6411 if (maxv > minv) {
6412 fac = 7.5f / (maxv - minv);
6413 } else {
6414 fac = 0;
6415 }
6416 add = -minv * fac;
6417 blk = src + 1;
6418 a = (remap[(int)(blk[0] * fac + add)])
6419 | (remap[(int)(blk[2] * fac + add)] << 3)
6420 | (remap[(int)(blk[4] * fac + add)] << 6)
6421 | (remap[(int)(blk[6] * fac + add)] << 9);
6422 blk += stride;
6423 b = (remap[(int)(blk[0] * fac + add)] << 4)
6424 | (remap[(int)(blk[2] * fac + add)] << 7)
6425 | (remap[(int)(blk[4] * fac + add)] << 10)
6426 | (remap[(int)(blk[6] * fac + add)] << 13);
6427 blk += stride;
6428 c = (remap[(int)(blk[0] * fac + add)])
6429 | (remap[(int)(blk[2] * fac + add)] << 3)
6430 | (remap[(int)(blk[4] * fac + add)] << 6)
6431 | (remap[(int)(blk[6] * fac + add)] << 9);
6432 blk += stride;
6433 d = (remap[(int)(blk[0] * fac + add)] << 4)
6434 | (remap[(int)(blk[2] * fac + add)] << 7)
6435 | (remap[(int)(blk[4] * fac + add)] << 10)
6436 | (remap[(int)(blk[6] * fac + add)] << 13);
6437
6438 *(dest++) = maxv;
6439 *(dest++) = minv;
6440 *(dest++) = a & 0xff;
6441 *(dest++) = (a >> 8) | (b & 0xf0);
6442 *(dest++) = b >> 8;
6443 *(dest++) = c & 0xff;
6444 *(dest++) = (c >> 8) | (d & 0xf0);
6445 *(dest++) = d >> 8;
6446
6447 // Advance to the beginning of the next 4x4 block.
6448 src += 8;
6449 }
6450 src += stride * 3;
6451 }
6453 }
6454}
6455
6456/**
6457 * Decompresses a RAM image compressed using BC4.
6458 */
6459void Texture::
6460do_uncompress_ram_image_bc4(const RamImage &compressed_image,
6461 RamImage &uncompressed_image,
6462 int x_size, int y_size, int num_pages) {
6463 int x_blocks = (x_size >> 2);
6464 int y_blocks = (y_size >> 2);
6465
6466 for (int z = 0; z < num_pages; ++z) {
6467 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6468 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6469
6470 // Unconvert one 4 x 4 block at a time.
6471 uint8_t tbl[8];
6472 for (int y = 0; y < y_blocks; ++y) {
6473 for (int x = 0; x < x_blocks; ++x) {
6474 unsigned char *blk = dest;
6475 tbl[0] = src[0];
6476 tbl[1] = src[1];
6477 if (tbl[0] > tbl[1]) {
6478 tbl[2] = (tbl[0] * 6 + tbl[1] * 1) / 7.0f;
6479 tbl[3] = (tbl[0] * 5 + tbl[1] * 2) / 7.0f;
6480 tbl[4] = (tbl[0] * 4 + tbl[1] * 3) / 7.0f;
6481 tbl[5] = (tbl[0] * 3 + tbl[1] * 4) / 7.0f;
6482 tbl[6] = (tbl[0] * 2 + tbl[1] * 5) / 7.0f;
6483 tbl[7] = (tbl[0] * 1 + tbl[1] * 6) / 7.0f;
6484 } else {
6485 tbl[2] = (tbl[0] * 4 + tbl[1] * 1) / 5.0f;
6486 tbl[3] = (tbl[0] * 3 + tbl[1] * 2) / 5.0f;
6487 tbl[4] = (tbl[0] * 2 + tbl[1] * 3) / 5.0f;
6488 tbl[5] = (tbl[0] * 1 + tbl[1] * 4) / 5.0f;
6489 tbl[6] = 0;
6490 tbl[7] = 255;
6491 }
6492 int v = src[2] + (src[3] << 8) + (src[4] << 16);
6493 blk[0] = tbl[v & 0x7];
6494 blk[1] = tbl[(v & 0x000038) >> 3];
6495 blk[2] = tbl[(v & 0x0001c0) >> 6];
6496 blk[3] = tbl[(v & 0x000e00) >> 9];
6497 blk += x_size;
6498 blk[0] = tbl[(v & 0x007000) >> 12];
6499 blk[1] = tbl[(v & 0x038000) >> 15];
6500 blk[2] = tbl[(v & 0x1c0000) >> 18];
6501 blk[3] = tbl[(v & 0xe00000) >> 21];
6502 blk += x_size;
6503 v = src[5] + (src[6] << 8) + (src[7] << 16);
6504 blk[0] = tbl[v & 0x7];
6505 blk[1] = tbl[(v & 0x000038) >> 3];
6506 blk[2] = tbl[(v & 0x0001c0) >> 6];
6507 blk[3] = tbl[(v & 0x000e00) >> 9];
6508 blk += x_size;
6509 blk[0] = tbl[(v & 0x007000) >> 12];
6510 blk[1] = tbl[(v & 0x038000) >> 15];
6511 blk[2] = tbl[(v & 0x1c0000) >> 18];
6512 blk[3] = tbl[(v & 0xe00000) >> 21];
6513 src += 8;
6514 dest += 4;
6515 }
6516 dest += x_size * 3;
6517 }
6519 }
6520}
6521
6522/**
6523 * Decompresses a RAM image compressed using BC5.
6524 */
6525void Texture::
6526do_uncompress_ram_image_bc5(const RamImage &compressed_image,
6527 RamImage &uncompressed_image,
6528 int x_size, int y_size, int num_pages) {
6529 int x_blocks = (x_size >> 2);
6530 int y_blocks = (y_size >> 2);
6531 int stride = x_size * 2;
6532
6533 for (int z = 0; z < num_pages; ++z) {
6534 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6535 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6536
6537 // Unconvert one 4 x 4 block at a time.
6538 uint8_t red[8];
6539 uint8_t grn[8];
6540 for (int y = 0; y < y_blocks; ++y) {
6541 for (int x = 0; x < x_blocks; ++x) {
6542 unsigned char *blk = dest;
6543 red[0] = src[0];
6544 red[1] = src[1];
6545 if (red[0] > red[1]) {
6546 red[2] = (red[0] * 6 + red[1] * 1) / 7.0f;
6547 red[3] = (red[0] * 5 + red[1] * 2) / 7.0f;
6548 red[4] = (red[0] * 4 + red[1] * 3) / 7.0f;
6549 red[5] = (red[0] * 3 + red[1] * 4) / 7.0f;
6550 red[6] = (red[0] * 2 + red[1] * 5) / 7.0f;
6551 red[7] = (red[0] * 1 + red[1] * 6) / 7.0f;
6552 } else {
6553 red[2] = (red[0] * 4 + red[1] * 1) / 5.0f;
6554 red[3] = (red[0] * 3 + red[1] * 2) / 5.0f;
6555 red[4] = (red[0] * 2 + red[1] * 3) / 5.0f;
6556 red[5] = (red[0] * 1 + red[1] * 4) / 5.0f;
6557 red[6] = 0;
6558 red[7] = 255;
6559 }
6560 grn[0] = src[8];
6561 grn[1] = src[9];
6562 if (grn[0] > grn[1]) {
6563 grn[2] = (grn[0] * 6 + grn[1] * 1) / 7.0f;
6564 grn[3] = (grn[0] * 5 + grn[1] * 2) / 7.0f;
6565 grn[4] = (grn[0] * 4 + grn[1] * 3) / 7.0f;
6566 grn[5] = (grn[0] * 3 + grn[1] * 4) / 7.0f;
6567 grn[6] = (grn[0] * 2 + grn[1] * 5) / 7.0f;
6568 grn[7] = (grn[0] * 1 + grn[1] * 6) / 7.0f;
6569 } else {
6570 grn[2] = (grn[0] * 4 + grn[1] * 1) / 5.0f;
6571 grn[3] = (grn[0] * 3 + grn[1] * 2) / 5.0f;
6572 grn[4] = (grn[0] * 2 + grn[1] * 3) / 5.0f;
6573 grn[5] = (grn[0] * 1 + grn[1] * 4) / 5.0f;
6574 grn[6] = 0;
6575 grn[7] = 255;
6576 }
6577 int r = src[2] + (src[3] << 8) + (src[4] << 16);
6578 int g = src[10] + (src[11] << 8) + (src[12] << 16);
6579 blk[0] = red[r & 0x7];
6580 blk[1] = grn[g & 0x7];
6581 blk[2] = red[(r & 0x000038) >> 3];
6582 blk[3] = grn[(g & 0x000038) >> 3];
6583 blk[4] = red[(r & 0x0001c0) >> 6];
6584 blk[5] = grn[(g & 0x0001c0) >> 6];
6585 blk[6] = red[(r & 0x000e00) >> 9];
6586 blk[7] = grn[(g & 0x000e00) >> 9];
6587 blk += stride;
6588 blk[0] = red[(r & 0x007000) >> 12];
6589 blk[1] = grn[(g & 0x007000) >> 12];
6590 blk[2] = red[(r & 0x038000) >> 15];
6591 blk[3] = grn[(g & 0x038000) >> 15];
6592 blk[4] = red[(r & 0x1c0000) >> 18];
6593 blk[5] = grn[(g & 0x1c0000) >> 18];
6594 blk[6] = red[(r & 0xe00000) >> 21];
6595 blk[7] = grn[(g & 0xe00000) >> 21];
6596 blk += stride;
6597 r = src[5] + (src[6] << 8) + (src[7] << 16);
6598 g = src[13] + (src[14] << 8) + (src[15] << 16);
6599 blk[0] = red[r & 0x7];
6600 blk[1] = grn[g & 0x7];
6601 blk[2] = red[(r & 0x000038) >> 3];
6602 blk[3] = grn[(g & 0x000038) >> 3];
6603 blk[4] = red[(r & 0x0001c0) >> 6];
6604 blk[5] = grn[(g & 0x0001c0) >> 6];
6605 blk[6] = red[(r & 0x000e00) >> 9];
6606 blk[7] = grn[(g & 0x000e00) >> 9];
6607 blk += stride;
6608 blk[0] = red[(r & 0x007000) >> 12];
6609 blk[1] = grn[(g & 0x007000) >> 12];
6610 blk[2] = red[(r & 0x038000) >> 15];
6611 blk[3] = grn[(g & 0x038000) >> 15];
6612 blk[4] = red[(r & 0x1c0000) >> 18];
6613 blk[5] = grn[(g & 0x1c0000) >> 18];
6614 blk[6] = red[(r & 0xe00000) >> 21];
6615 blk[7] = grn[(g & 0xe00000) >> 21];
6616 src += 16;
6617 dest += 8;
6618 }
6619 dest += stride * 3;
6620 }
6622 }
6623}
6624
6625/**
6626 *
6627 */
6628bool Texture::
6629do_has_all_ram_mipmap_images(const CData *cdata) const {
6630 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
6631 // If we don't even have a base image, the answer is no.
6632 return false;
6633 }
6634 if (!uses_mipmaps()) {
6635 // If we have a base image and don't require mipmapping, the answer is
6636 // yes.
6637 return true;
6638 }
6639
6640 // Check that we have enough mipmap levels to meet the size requirements.
6641 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
6642 int n = 0;
6643 int x = 1;
6644 while (x < size) {
6645 x = (x << 1);
6646 ++n;
6647 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
6648 return false;
6649 }
6650 }
6651
6652 return true;
6653}
6654
6655/**
6656 * Considers whether the z_size (or num_views) should automatically be
6657 * adjusted when the user loads a new page. Returns true if the z size is
6658 * valid, false otherwise.
6659 *
6660 * Assumes the lock is already held.
6661 */
6662bool Texture::
6663do_reconsider_z_size(CData *cdata, int z, const LoaderOptions &options) {
6664 if (z >= cdata->_z_size * cdata->_num_views) {
6665 bool num_views_specified = true;
6666 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
6667 // This flag is false if is a multiview texture with a specified number
6668 // of views. It is true if it is not a multiview texture, or if it is
6669 // but the number of views is explicitly specified.
6670 num_views_specified = (options.get_texture_num_views() != 0);
6671 }
6672
6673 if (num_views_specified &&
6674 (cdata->_texture_type == Texture::TT_3d_texture ||
6675 cdata->_texture_type == Texture::TT_2d_texture_array)) {
6676 // If we're loading a page past _z_size, treat it as an implicit request
6677 // to enlarge _z_size. However, this is only legal if this is, in fact,
6678 // a 3-d texture or a 2d texture array (cube maps always have z_size 6,
6679 // and other types have z_size 1).
6680 nassertr(cdata->_num_views != 0, false);
6681 cdata->_z_size = (z / cdata->_num_views) + 1;
6682
6683 } else if (cdata->_z_size != 0) {
6684 // In the case of a 2-d texture or cube map, or a 3-d texture with an
6685 // unspecified _num_views, assume we're loading views of a multiview
6686 // texture.
6687 cdata->_num_views = (z / cdata->_z_size) + 1;
6688
6689 } else {
6690 // The first image loaded sets an implicit z-size.
6691 cdata->_z_size = 1;
6692 }
6693
6694 // Increase the size of the data buffer to make room for the new texture
6695 // level.
6696 do_allocate_pages(cdata);
6697 }
6698
6699 return true;
6700}
6701
6702/**
6703 * Called internally by do_reconsider_z_size() to allocate new memory in
6704 * _ram_images[0] for the new number of pages.
6705 *
6706 * Assumes the lock is already held.
6707 */
6708void Texture::
6709do_allocate_pages(CData *cdata) {
6710 size_t new_size = do_get_expected_ram_image_size(cdata);
6711 if (!cdata->_ram_images.empty() &&
6712 !cdata->_ram_images[0]._image.empty() &&
6713 new_size > cdata->_ram_images[0]._image.size()) {
6714 cdata->_ram_images[0]._image.insert(cdata->_ram_images[0]._image.end(), new_size - cdata->_ram_images[0]._image.size(), 0);
6715 nassertv(cdata->_ram_images[0]._image.size() == new_size);
6716 }
6717}
6718
6719/**
6720 * Resets the internal Texture properties when a new image file is loaded.
6721 * Returns true if the new image is valid, false otherwise.
6722 *
6723 * Assumes the lock is already held.
6724 */
6725bool Texture::
6726do_reconsider_image_properties(CData *cdata, int x_size, int y_size, int num_components,
6727 Texture::ComponentType component_type, int z,
6728 const LoaderOptions &options) {
6729 if (!cdata->_loaded_from_image || num_components != cdata->_num_components || component_type != cdata->_component_type) {
6730 // Come up with a default format based on the number of channels. But
6731 // only do this the first time the file is loaded, or if the number of
6732 // channels in the image changes on subsequent loads.
6733
6734 // TODO: handle sRGB properly
6735 switch (num_components) {
6736 case 1:
6737 cdata->_format = F_luminance;
6738 break;
6739
6740 case 2:
6741 cdata->_format = F_luminance_alpha;
6742 break;
6743
6744 case 3:
6745 cdata->_format = F_rgb;
6746 break;
6747
6748 case 4:
6749 cdata->_format = F_rgba;
6750 break;
6751
6752 default:
6753 // Eh?
6754 nassert_raise("unexpected channel count");
6755 cdata->_format = F_rgb;
6756 return false;
6757 }
6758 }
6759
6760 if (!cdata->_loaded_from_image) {
6761 if ((options.get_texture_flags() & LoaderOptions::TF_allow_1d) &&
6762 cdata->_texture_type == TT_2d_texture && x_size != 1 && y_size == 1) {
6763 // If we're loading an Nx1 size texture, infer a 1-d texture type.
6764 cdata->_texture_type = TT_1d_texture;
6765 }
6766
6767#ifndef NDEBUG
6768 switch (cdata->_texture_type) {
6769 case TT_1d_texture:
6770 case TT_buffer_texture:
6771 nassertr(y_size == 1, false);
6772 break;
6773 case TT_cube_map:
6774 case TT_cube_map_array:
6775 nassertr(x_size == y_size, false);
6776 break;
6777 default:
6778 break;
6779 }
6780#endif
6781 if ((cdata->_x_size != x_size)||(cdata->_y_size != y_size)) {
6782 do_set_pad_size(cdata, 0, 0, 0);
6783 }
6784 cdata->_x_size = x_size;
6785 cdata->_y_size = y_size;
6786 cdata->_num_components = num_components;
6787 do_set_component_type(cdata, component_type);
6788
6789 } else {
6790 if (cdata->_x_size != x_size ||
6791 cdata->_y_size != y_size ||
6792 cdata->_num_components != num_components ||
6793 cdata->_component_type != component_type) {
6794 gobj_cat.error()
6795 << "Texture properties have changed for texture " << get_name()
6796 << " page " << z << ".\n";
6797 return false;
6798 }
6799 }
6800
6801 return true;
6802}
6803
6804/**
6805 *
6806 */
6807bool Texture::
6808do_rescale_texture(CData *cdata) {
6809 int new_x_size = cdata->_x_size;
6810 int new_y_size = cdata->_y_size;
6811 if (cdata->_z_size * cdata->_num_views != 1) {
6812 nassert_raise("rescale_texture() doesn't support 3-d or multiview textures.");
6813 return false;
6814 }
6815
6816 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), false)) {
6817 // OK, we have to scale the image.
6818 PNMImage orig_image;
6819 if (!do_store_one(cdata, orig_image, 0, 0)) {
6820 gobj_cat.warning()
6821 << "Couldn't get image in rescale_texture()\n";
6822 return false;
6823 }
6824
6825 gobj_cat.info()
6826 << "Resizing " << get_name() << " to " << new_x_size << " x "
6827 << new_y_size << "\n";
6828 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6829 orig_image.get_maxval(), orig_image.get_type(),
6830 orig_image.get_color_space());
6831 new_image.quick_filter_from(orig_image);
6832
6833 do_clear_ram_image(cdata);
6834 cdata->inc_image_modified();
6835 cdata->_x_size = new_x_size;
6836 cdata->_y_size = new_y_size;
6837 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6838 return false;
6839 }
6840
6841 return true;
6842 }
6843
6844 // Maybe we should pad the image.
6845 int pad_x_size = 0;
6846 int pad_y_size = 0;
6847 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
6848 new_x_size = cdata->_x_size;
6849 new_y_size = cdata->_y_size;
6850 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), true)) {
6851 pad_x_size = new_x_size - cdata->_x_size;
6852 pad_y_size = new_y_size - cdata->_y_size;
6853
6854 PNMImage orig_image;
6855 if (!do_store_one(cdata, orig_image, 0, 0)) {
6856 gobj_cat.warning()
6857 << "Couldn't get image in rescale_texture()\n";
6858 return false;
6859 }
6860 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6861 orig_image.get_maxval(), orig_image.get_type(),
6862 orig_image.get_color_space());
6863 new_image.copy_sub_image(orig_image, 0, new_y_size - orig_image.get_y_size());
6864
6865 do_clear_ram_image(cdata);
6866 cdata->_loaded_from_image = false;
6867 cdata->inc_image_modified();
6868 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6869 return false;
6870 }
6871
6872 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
6873 return true;
6874 }
6875 }
6876
6877 // No changes needed.
6878 return false;
6879}
6880
6881/**
6882 *
6883 */
6884PT(Texture) Texture::
6885make_copy_impl() const {
6886 CDReader cdata(_cycler);
6887 return do_make_copy(cdata);
6888}
6889
6890/**
6891 *
6892 */
6893PT(Texture) Texture::
6894do_make_copy(const CData *cdata) const {
6895 PT(Texture) tex = new Texture(get_name());
6896 CDWriter cdata_tex(tex->_cycler, true);
6897 tex->do_assign(cdata_tex, this, cdata);
6898 return tex;
6899}
6900
6901/**
6902 * The internal implementation of operator =(). Assumes the lock is already
6903 * held on both Textures.
6904 */
6905void Texture::
6906do_assign(CData *cdata, const Texture *copy, const CData *cdata_copy) {
6907 cdata->do_assign(cdata_copy);
6908}
6909
6910/**
6911 * The protected implementation of clear(). Assumes the lock is already held.
6912 */
6913void Texture::
6914do_clear(CData *cdata) {
6915 Texture tex;
6916 tex.local_object();
6917 CDReader cdata_tex(tex._cycler);
6918 do_assign(cdata, &tex, cdata_tex);
6919
6920 cdata->inc_properties_modified();
6921 cdata->inc_image_modified();
6922 cdata->inc_simple_image_modified();
6923}
6924
6925/**
6926 *
6927 */
6928void Texture::
6929do_setup_texture(CData *cdata, Texture::TextureType texture_type,
6930 int x_size, int y_size, int z_size,
6931 Texture::ComponentType component_type,
6932 Texture::Format format) {
6933 switch (texture_type) {
6934 case TT_1d_texture:
6935 nassertv(y_size == 1 && z_size == 1);
6936 break;
6937
6938 case TT_2d_texture:
6939 nassertv(z_size == 1);
6940 break;
6941
6942 case TT_3d_texture:
6943 break;
6944
6945 case TT_2d_texture_array:
6946 break;
6947
6948 case TT_cube_map:
6949 // Cube maps must always consist of six square images.
6950 nassertv(x_size == y_size && z_size == 6);
6951
6952 // In principle the wrap mode shouldn't mean anything to a cube map, but
6953 // some drivers seem to misbehave if it's other than
6954 // SamplerState::WM_clamp.
6955 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6956 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6957 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6958 break;
6959
6960 case TT_cube_map_array:
6961 // Cube maps array z_size needs to be a multiple of 6.
6962 nassertv(x_size == y_size && z_size % 6 == 0);
6963
6964 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6965 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6966 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6967 break;
6968
6969 case TT_buffer_texture:
6970 nassertv(y_size == 1 && z_size == 1);
6971 break;
6972
6973 case TT_1d_texture_array:
6974 nassertv(z_size == 1);
6975 break;
6976 }
6977
6978 if (texture_type != TT_2d_texture) {
6979 do_clear_simple_ram_image(cdata);
6980 }
6981
6982 cdata->_texture_type = texture_type;
6983 cdata->_x_size = x_size;
6984 cdata->_y_size = y_size;
6985 cdata->_z_size = z_size;
6986 cdata->_num_views = 1;
6987 do_set_component_type(cdata, component_type);
6988 do_set_format(cdata, format);
6989
6990 do_clear_ram_image(cdata);
6991 do_set_pad_size(cdata, 0, 0, 0);
6992 cdata->_orig_file_x_size = 0;
6993 cdata->_orig_file_y_size = 0;
6994 cdata->_loaded_from_image = false;
6995 cdata->_loaded_from_txo = false;
6996 cdata->_has_read_pages = false;
6997 cdata->_has_read_mipmaps = false;
6998}
6999
7000/**
7001 *
7002 */
7003void Texture::
7004do_set_format(CData *cdata, Texture::Format format) {
7005 if (format == cdata->_format) {
7006 return;
7007 }
7008 cdata->_format = format;
7009 cdata->inc_properties_modified();
7010
7011 switch (cdata->_format) {
7012 case F_color_index:
7013 case F_depth_stencil:
7014 case F_depth_component:
7015 case F_depth_component16:
7016 case F_depth_component24:
7017 case F_depth_component32:
7018 case F_red:
7019 case F_green:
7020 case F_blue:
7021 case F_alpha:
7022 case F_luminance:
7023 case F_r16:
7024 case F_r16i:
7025 case F_sluminance:
7026 case F_r32i:
7027 case F_r32:
7028 case F_r8i:
7029 cdata->_num_components = 1;
7030 break;
7031
7032 case F_luminance_alpha:
7033 case F_luminance_alphamask:
7034 case F_rg16:
7035 case F_sluminance_alpha:
7036 case F_rg32:
7037 case F_rg8i:
7038 case F_rg:
7039 case F_rg16i:
7040 case F_rg32i:
7041 cdata->_num_components = 2;
7042 break;
7043
7044 case F_rgb:
7045 case F_rgb5:
7046 case F_rgb8:
7047 case F_rgb12:
7048 case F_rgb332:
7049 case F_rgb16:
7050 case F_srgb:
7051 case F_rgb32:
7052 case F_rgb8i:
7053 case F_r11_g11_b10:
7054 case F_rgb9_e5:
7055 case F_rgb16i:
7056 case F_rgb32i:
7057 cdata->_num_components = 3;
7058 break;
7059
7060 case F_rgba:
7061 case F_rgbm:
7062 case F_rgba4:
7063 case F_rgba5:
7064 case F_rgba8:
7065 case F_rgba12:
7066 case F_rgba16:
7067 case F_rgba32:
7068 case F_srgb_alpha:
7069 case F_rgba8i:
7070 case F_rgb10_a2:
7071 case F_rgba16i:
7072 case F_rgba32i:
7073 cdata->_num_components = 4;
7074 break;
7075 }
7076}
7077
7078/**
7079 *
7080 */
7081void Texture::
7082do_set_component_type(CData *cdata, Texture::ComponentType component_type) {
7083 cdata->_component_type = component_type;
7084
7085 switch (component_type) {
7086 case T_unsigned_byte:
7087 case T_byte:
7088 cdata->_component_width = 1;
7089 break;
7090
7091 case T_unsigned_short:
7092 case T_short:
7093 case T_half_float:
7094 cdata->_component_width = 2;
7095 break;
7096
7097 case T_float:
7098 case T_unsigned_int_24_8:
7099 case T_int:
7100 case T_unsigned_int:
7101 cdata->_component_width = 4;
7102 break;
7103 }
7104}
7105
7106/**
7107 *
7108 */
7109void Texture::
7110do_set_x_size(CData *cdata, int x_size) {
7111 if (cdata->_x_size != x_size) {
7112 cdata->_x_size = x_size;
7113 cdata->inc_image_modified();
7114 do_clear_ram_image(cdata);
7115 do_set_pad_size(cdata, 0, 0, 0);
7116 }
7117}
7118
7119/**
7120 *
7121 */
7122void Texture::
7123do_set_y_size(CData *cdata, int y_size) {
7124 if (cdata->_y_size != y_size) {
7125 nassertv((cdata->_texture_type != Texture::TT_buffer_texture &&
7126 cdata->_texture_type != Texture::TT_1d_texture) || y_size == 1);
7127 cdata->_y_size = y_size;
7128 cdata->inc_image_modified();
7129 do_clear_ram_image(cdata);
7130 do_set_pad_size(cdata, 0, 0, 0);
7131 }
7132}
7133
7134/**
7135 * Changes the z size indicated for the texture. This also implicitly unloads
7136 * the texture if it has already been loaded.
7137 */
7138void Texture::
7139do_set_z_size(CData *cdata, int z_size) {
7140 if (cdata->_z_size != z_size) {
7141 nassertv((cdata->_texture_type == Texture::TT_3d_texture) ||
7142 (cdata->_texture_type == Texture::TT_cube_map && z_size == 6) ||
7143 (cdata->_texture_type == Texture::TT_cube_map_array && z_size % 6 == 0) ||
7144 (cdata->_texture_type == Texture::TT_2d_texture_array) || (z_size == 1));
7145 cdata->_z_size = z_size;
7146 cdata->inc_image_modified();
7147 do_clear_ram_image(cdata);
7148 do_set_pad_size(cdata, 0, 0, 0);
7149 }
7150}
7151
7152/**
7153 *
7154 */
7155void Texture::
7156do_set_num_views(CData *cdata, int num_views) {
7157 nassertv(num_views >= 1);
7158 if (cdata->_num_views != num_views) {
7159 cdata->_num_views = num_views;
7160 if (do_has_ram_image(cdata)) {
7161 cdata->inc_image_modified();
7162 do_clear_ram_image(cdata);
7163 }
7164 do_set_pad_size(cdata, 0, 0, 0);
7165 }
7166}
7167
7168/**
7169 *
7170 */
7171void Texture::
7172do_set_wrap_u(CData *cdata, SamplerState::WrapMode wrap) {
7173 if (cdata->_default_sampler.get_wrap_u() != wrap) {
7174 cdata->inc_properties_modified();
7175 cdata->_default_sampler.set_wrap_u(wrap);
7176 }
7177}
7178
7179/**
7180 *
7181 */
7182void Texture::
7183do_set_wrap_v(CData *cdata, SamplerState::WrapMode wrap) {
7184 if (cdata->_default_sampler.get_wrap_v() != wrap) {
7185 cdata->inc_properties_modified();
7186 cdata->_default_sampler.set_wrap_v(wrap);
7187 }
7188}
7189
7190/**
7191 *
7192 */
7193void Texture::
7194do_set_wrap_w(CData *cdata, SamplerState::WrapMode wrap) {
7195 if (cdata->_default_sampler.get_wrap_w() != wrap) {
7196 cdata->inc_properties_modified();
7197 cdata->_default_sampler.set_wrap_w(wrap);
7198 }
7199}
7200
7201/**
7202 *
7203 */
7204void Texture::
7205do_set_minfilter(CData *cdata, SamplerState::FilterType filter) {
7206 if (cdata->_default_sampler.get_minfilter() != filter) {
7207 cdata->inc_properties_modified();
7208 cdata->_default_sampler.set_minfilter(filter);
7209 }
7210}
7211
7212/**
7213 *
7214 */
7215void Texture::
7216do_set_magfilter(CData *cdata, SamplerState::FilterType filter) {
7217 if (cdata->_default_sampler.get_magfilter() != filter) {
7218 cdata->inc_properties_modified();
7219 cdata->_default_sampler.set_magfilter(filter);
7220 }
7221}
7222
7223/**
7224 *
7225 */
7226void Texture::
7227do_set_anisotropic_degree(CData *cdata, int anisotropic_degree) {
7228 if (cdata->_default_sampler.get_anisotropic_degree() != anisotropic_degree) {
7229 cdata->inc_properties_modified();
7230 cdata->_default_sampler.set_anisotropic_degree(anisotropic_degree);
7231 }
7232}
7233
7234/**
7235 *
7236 */
7237void Texture::
7238do_set_border_color(CData *cdata, const LColor &color) {
7239 if (cdata->_default_sampler.get_border_color() != color) {
7240 cdata->inc_properties_modified();
7241 cdata->_default_sampler.set_border_color(color);
7242 }
7243}
7244
7245/**
7246 *
7247 */
7248void Texture::
7249do_set_compression(CData *cdata, Texture::CompressionMode compression) {
7250 if (cdata->_compression != compression) {
7251 cdata->inc_properties_modified();
7252 cdata->_compression = compression;
7253
7254 if (do_has_ram_image(cdata)) {
7255 bool has_compression = do_has_compression(cdata);
7256 bool has_ram_image_compression = (cdata->_ram_image_compression != CM_off);
7257 if (has_compression != has_ram_image_compression ||
7259 // Reload if we're turning compression on or off, or if we're changing
7260 // the compression mode to a different kind of compression.
7261 do_reload(cdata);
7262 }
7263 }
7264 }
7265}
7266
7267/**
7268 *
7269 */
7270void Texture::
7271do_set_quality_level(CData *cdata, Texture::QualityLevel quality_level) {
7272 if (cdata->_quality_level != quality_level) {
7273 cdata->inc_properties_modified();
7274 cdata->_quality_level = quality_level;
7275 }
7276}
7277
7278/**
7279 *
7280 */
7281bool Texture::
7282do_has_compression(const CData *cdata) const {
7283 if (cdata->_compression == CM_default) {
7284 if (cdata->_texture_type != Texture::TT_buffer_texture) {
7285 return compressed_textures;
7286 } else {
7287 return false;
7288 }
7289 } else {
7290 return (cdata->_compression != CM_off);
7291 }
7292}
7293
7294/**
7295 * The protected implementation of has_ram_image(). Assumes the lock is
7296 * already held.
7297 */
7298bool Texture::
7299do_has_ram_image(const CData *cdata) const {
7300 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty();
7301}
7302
7303/**
7304 * The protected implementation of has_uncompressed_ram_image(). Assumes the
7305 * lock is already held.
7306 */
7307bool Texture::
7308do_has_uncompressed_ram_image(const CData *cdata) const {
7309 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty() && cdata->_ram_image_compression == CM_off;
7310}
7311
7312/**
7313 *
7314 */
7315CPTA_uchar Texture::
7316do_get_ram_image(CData *cdata) {
7317 if (!do_has_ram_image(cdata) && do_can_reload(cdata)) {
7318 do_reload_ram_image(cdata, true);
7319
7320 if (do_has_ram_image(cdata)) {
7321 // Normally, we don't update the cdata->_modified semaphores in a
7322 // do_blah method, but we'll make an exception in this case, because
7323 // it's easiest to modify these here, and only when we know it's needed.
7324 cdata->inc_image_modified();
7325 cdata->inc_properties_modified();
7326 }
7327 }
7328
7329 if (cdata->_ram_images.empty()) {
7330 return CPTA_uchar(get_class_type());
7331 }
7332
7333 return cdata->_ram_images[0]._image;
7334}
7335
7336/**
7337 *
7338 */
7339CPTA_uchar Texture::
7340do_get_uncompressed_ram_image(CData *cdata) {
7341 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7342 // We have an image in-ram, but it's compressed. Try to uncompress it
7343 // first.
7344 if (do_uncompress_ram_image(cdata)) {
7345 if (gobj_cat.is_debug()) {
7346 gobj_cat.debug()
7347 << "Uncompressed " << get_name() << "\n";
7348 }
7349 return cdata->_ram_images[0]._image;
7350 }
7351 }
7352
7353 // Couldn't uncompress the existing image. Try to reload it.
7354 if ((!do_has_ram_image(cdata) || cdata->_ram_image_compression != CM_off) && do_can_reload(cdata)) {
7355 do_reload_ram_image(cdata, false);
7356 }
7357
7358 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7359 // Great, now we have an image.
7360 if (do_uncompress_ram_image(cdata)) {
7361 gobj_cat.info()
7362 << "Uncompressed " << get_name() << "\n";
7363 return cdata->_ram_images[0]._image;
7364 }
7365 }
7366
7367 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
7368 return CPTA_uchar(get_class_type());
7369 }
7370
7371 return cdata->_ram_images[0]._image;
7372}
7373
7374/**
7375 * Returns the uncompressed system-RAM image data associated with the texture.
7376 * Rather than just returning a pointer to the data, like
7377 * get_uncompressed_ram_image, this function first processes the data and
7378 * reorders the components using the specified format string, and places these
7379 * into a new char array.
7380 *
7381 * The 'format' argument should specify in which order the components of the
7382 * texture must be. For example, valid format strings are "RGBA", "GA",
7383 * "ABRG" or "AAA". A component can also be written as "0" or "1", which
7384 * means an empty/black or a full/white channel, respectively.
7385 *
7386 * This function is particularly useful to copy an image in-memory to a
7387 * different library (for example, PIL or wxWidgets) that require a different
7388 * component order than Panda's internal format, BGRA. Note, however, that
7389 * this conversion can still be too slow if you want to do it every frame, and
7390 * should thus be avoided for that purpose.
7391 *
7392 * The only requirement for the reordering is that an uncompressed image must
7393 * be available. If the RAM image is compressed, it will attempt to re-load
7394 * the texture from disk, if it doesn't find an uncompressed image there, it
7395 * will return NULL.
7396 */
7398get_ram_image_as(const string &requested_format) {
7399 CDWriter cdata(_cycler, false);
7400 string format = upcase(requested_format);
7401
7402 // Make sure we can grab something that's uncompressed.
7403 CPTA_uchar data = do_get_uncompressed_ram_image(cdata);
7404 if (data == nullptr) {
7405 gobj_cat.error() << "Couldn't find an uncompressed RAM image!\n";
7406 return CPTA_uchar(get_class_type());
7407 }
7408 size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
7409 (size_t)cdata->_z_size * (size_t)cdata->_num_views;
7410 nassertr(cdata->_num_components > 0 && cdata->_num_components <= 4, CPTA_uchar(get_class_type()));
7411 nassertr(data.size() == (size_t)(cdata->_component_width * cdata->_num_components * imgsize), CPTA_uchar(get_class_type()));
7412
7413 // Check if the format is already what we have internally.
7414 if ((cdata->_num_components == 1 && format.size() == 1) ||
7415 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
7416 (cdata->_num_components == 3 && format == "BGR") ||
7417 (cdata->_num_components == 4 && format == "BGRA")) {
7418 // The format string is already our format, so we just need to copy it.
7419 return CPTA_uchar(data);
7420 }
7421
7422 // Check if we have an alpha channel, and remember which channel we use.
7423 int alpha = -1;
7424 if (Texture::has_alpha(cdata->_format)) {
7425 alpha = cdata->_num_components - 1;
7426 }
7427
7428 // Validate the format beforehand.
7429 for (size_t i = 0; i < format.size(); ++i) {
7430 if (format[i] != 'B' && format[i] != 'G' && format[i] != 'R' &&
7431 format[i] != 'A' && format[i] != '0' && format[i] != '1') {
7432 gobj_cat.error() << "Unexpected component character '"
7433 << format[i] << "', expected one of RGBA01!\n";
7434 return CPTA_uchar(get_class_type());
7435 }
7436 }
7437
7438 // Create a new empty array that can hold our image.
7439 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * format.size() * cdata->_component_width, get_class_type());
7440
7441 // These ifs are for optimization of commonly used image types.
7442 if (cdata->_component_width == 1) {
7443 if (format == "RGBA" && cdata->_num_components == 4) {
7444 const uint32_t *src = (const uint32_t *)data.p();
7445 uint32_t *dst = (uint32_t *)newdata.p();
7446
7447 for (size_t p = 0; p < imgsize; ++p) {
7448 uint32_t v = *src++;
7449 *dst++ = ((v & 0xff00ff00u)) |
7450 ((v & 0x00ff0000u) >> 16) |
7451 ((v & 0x000000ffu) << 16);
7452 }
7453 return newdata;
7454 }
7455 if (format == "RGB" && cdata->_num_components == 4) {
7456 const uint32_t *src = (const uint32_t *)data.p();
7457 uint32_t *dst = (uint32_t *)newdata.p();
7458
7459 // Convert blocks of 4 pixels at a time, so that we can treat both the
7460 // source and destination as 32-bit integers.
7461 int blocks = imgsize >> 2;
7462 for (int i = 0; i < blocks; ++i) {
7463 uint32_t v0 = *src++;
7464 uint32_t v1 = *src++;
7465 uint32_t v2 = *src++;
7466 uint32_t v3 = *src++;
7467 *dst++ = ((v0 & 0x00ff0000u) >> 16) |
7468 ((v0 & 0x0000ff00u)) |
7469 ((v0 & 0x000000ffu) << 16) |
7470 ((v1 & 0x00ff0000u) << 8);
7471 *dst++ = ((v1 & 0x0000ff00u) >> 8) |
7472 ((v1 & 0x000000ffu) << 8) |
7473 ((v2 & 0x00ff0000u)) |
7474 ((v2 & 0x0000ff00u) << 16);
7475 *dst++ = ((v2 & 0x000000ffu)) |
7476 ((v3 & 0x00ff0000u) >> 8) |
7477 ((v3 & 0x0000ff00u) << 8) |
7478 ((v3 & 0x000000ffu) << 24);
7479 }
7480
7481 // If the image size wasn't a multiple of 4, we may have a handful of
7482 // pixels left over. Convert those the slower way.
7483 uint8_t *tail = (uint8_t *)dst;
7484 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7485 uint32_t v = *src++;
7486 *tail++ = (v & 0x00ff0000u) >> 16;
7487 *tail++ = (v & 0x0000ff00u) >> 8;
7488 *tail++ = (v & 0x000000ffu);
7489 }
7490 return newdata;
7491 }
7492 if (format == "BGR" && cdata->_num_components == 4) {
7493 const uint32_t *src = (const uint32_t *)data.p();
7494 uint32_t *dst = (uint32_t *)newdata.p();
7495
7496 // Convert blocks of 4 pixels at a time, so that we can treat both the
7497 // source and destination as 32-bit integers.
7498 int blocks = imgsize >> 2;
7499 for (int i = 0; i < blocks; ++i) {
7500 uint32_t v0 = *src++;
7501 uint32_t v1 = *src++;
7502 uint32_t v2 = *src++;
7503 uint32_t v3 = *src++;
7504 *dst++ = (v0 & 0x00ffffffu) | ((v1 & 0x000000ffu) << 24);
7505 *dst++ = ((v1 & 0x00ffff00u) >> 8) | ((v2 & 0x0000ffffu) << 16);
7506 *dst++ = ((v2 & 0x00ff0000u) >> 16) | ((v3 & 0x00ffffffu) << 8);
7507 }
7508
7509 // If the image size wasn't a multiple of 4, we may have a handful of
7510 // pixels left over. Convert those the slower way.
7511 uint8_t *tail = (uint8_t *)dst;
7512 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7513 uint32_t v = *src++;
7514 *tail++ = (v & 0x000000ffu);
7515 *tail++ = (v & 0x0000ff00u) >> 8;
7516 *tail++ = (v & 0x00ff0000u) >> 16;
7517 }
7518 return newdata;
7519 }
7520 const uint8_t *src = (const uint8_t *)data.p();
7521 uint8_t *dst = (uint8_t *)newdata.p();
7522
7523 if (format == "RGB" && cdata->_num_components == 3) {
7524 for (int i = 0; i < imgsize; ++i) {
7525 *dst++ = src[2];
7526 *dst++ = src[1];
7527 *dst++ = src[0];
7528 src += 3;
7529 }
7530 return newdata;
7531 }
7532 if (format == "A" && cdata->_num_components != 3) {
7533 // We can generally rely on alpha to be the last component.
7534 for (size_t p = 0; p < imgsize; ++p) {
7535 dst[p] = src[alpha];
7536 src += cdata->_num_components;
7537 }
7538 return newdata;
7539 }
7540 // Fallback case for other 8-bit-per-channel formats.
7541 for (size_t p = 0; p < imgsize; ++p) {
7542 for (size_t i = 0; i < format.size(); ++i) {
7543 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7544 *dst++ = src[0];
7545 } else if (format[i] == 'G') {
7546 *dst++ = src[1];
7547 } else if (format[i] == 'R') {
7548 *dst++ = src[2];
7549 } else if (format[i] == 'A') {
7550 if (alpha >= 0) {
7551 *dst++ = src[alpha];
7552 } else {
7553 *dst++ = 0xff;
7554 }
7555 } else if (format[i] == '1') {
7556 *dst++ = 0xff;
7557 } else {
7558 *dst++ = 0x00;
7559 }
7560 }
7561 src += cdata->_num_components;
7562 }
7563 return newdata;
7564 }
7565
7566 // The slow and general case.
7567 for (size_t p = 0; p < imgsize; ++p) {
7568 for (size_t i = 0; i < format.size(); ++i) {
7569 int component = 0;
7570 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7571 component = 0;
7572 } else if (format[i] == 'G') {
7573 component = 1;
7574 } else if (format[i] == 'R') {
7575 component = 2;
7576 } else if (format[i] == 'A') {
7577 if (alpha >= 0) {
7578 component = alpha;
7579 } else {
7580 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7581 continue;
7582 }
7583 } else if (format[i] == '1') {
7584 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7585 continue;
7586 } else {
7587 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), 0, cdata->_component_width);
7588 continue;
7589 }
7590 memcpy((void*)(newdata + (p * format.size() + i) * cdata->_component_width),
7591 (void*)(data + (p * cdata->_num_components + component) * cdata->_component_width),
7592 cdata->_component_width);
7593 }
7594 }
7595 return newdata;
7596}
7597
7598/**
7599 *
7600 */
7601void Texture::
7602do_set_simple_ram_image(CData *cdata, CPTA_uchar image, int x_size, int y_size) {
7603 nassertv(cdata->_texture_type == TT_2d_texture);
7604 size_t expected_page_size = (size_t)(x_size * y_size * 4);
7605 nassertv(image.size() == expected_page_size);
7606
7607 cdata->_simple_x_size = x_size;
7608 cdata->_simple_y_size = y_size;
7609 cdata->_simple_ram_image._image = image.cast_non_const();
7610 cdata->_simple_ram_image._page_size = image.size();
7611 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
7612 cdata->inc_simple_image_modified();
7613}
7614
7615/**
7616 *
7617 */
7618int Texture::
7619do_get_expected_num_mipmap_levels(const CData *cdata) const {
7620 if (cdata->_texture_type == Texture::TT_buffer_texture) {
7621 return 1;
7622 }
7623 int size = max(cdata->_x_size, cdata->_y_size);
7624 if (cdata->_texture_type == Texture::TT_3d_texture) {
7625 size = max(size, cdata->_z_size);
7626 }
7627 int count = 1;
7628 while (size > 1) {
7629 size >>= 1;
7630 ++count;
7631 }
7632 return count;
7633}
7634
7635/**
7636 *
7637 */
7638size_t Texture::
7639do_get_ram_mipmap_page_size(const CData *cdata, int n) const {
7640 if (cdata->_ram_image_compression != CM_off) {
7641 if (n >= 0 && n < (int)cdata->_ram_images.size()) {
7642 return cdata->_ram_images[n]._page_size;
7643 }
7644 return 0;
7645 } else {
7646 return do_get_expected_ram_mipmap_page_size(cdata, n);
7647 }
7648}
7649
7650/**
7651 *
7652 */
7653int Texture::
7654do_get_expected_mipmap_x_size(const CData *cdata, int n) const {
7655 int size = max(cdata->_x_size, 1);
7656 while (n > 0 && size > 1) {
7657 size >>= 1;
7658 --n;
7659 }
7660 return size;
7661}
7662
7663/**
7664 *
7665 */
7666int Texture::
7667do_get_expected_mipmap_y_size(const CData *cdata, int n) const {
7668 int size = max(cdata->_y_size, 1);
7669 while (n > 0 && size > 1) {
7670 size >>= 1;
7671 --n;
7672 }
7673 return size;
7674}
7675
7676/**
7677 *
7678 */
7679int Texture::
7680do_get_expected_mipmap_z_size(const CData *cdata, int n) const {
7681 // 3-D textures have a different number of pages per each mipmap level.
7682 // Other kinds of textures--especially, cube map textures--always have the
7683 // same.
7684 if (cdata->_texture_type == Texture::TT_3d_texture) {
7685 int size = max(cdata->_z_size, 1);
7686 while (n > 0 && size > 1) {
7687 size >>= 1;
7688 --n;
7689 }
7690 return size;
7691
7692 } else {
7693 return cdata->_z_size;
7694 }
7695}
7696
7697/**
7698 *
7699 */
7700void Texture::
7701do_clear_simple_ram_image(CData *cdata) {
7702 cdata->_simple_x_size = 0;
7703 cdata->_simple_y_size = 0;
7704 cdata->_simple_ram_image._image.clear();
7705 cdata->_simple_ram_image._page_size = 0;
7706 cdata->_simple_image_date_generated = 0;
7707
7708 // We allow this exception: we update the _simple_image_modified here, since
7709 // no one really cares much about that anyway, and it's convenient to do it
7710 // here.
7711 cdata->inc_simple_image_modified();
7712}
7713
7714/**
7715 *
7716 */
7717void Texture::
7718do_clear_ram_mipmap_images(CData *cdata) {
7719 if (!cdata->_ram_images.empty()) {
7720 cdata->_ram_images.erase(cdata->_ram_images.begin() + 1, cdata->_ram_images.end());
7721 }
7722}
7723
7724/**
7725 * Generates the RAM mipmap images for this texture, first uncompressing it as
7726 * required. Will recompress the image if it was originally compressed,
7727 * unless allow_recompress is true.
7728 */
7729void Texture::
7730do_generate_ram_mipmap_images(CData *cdata, bool allow_recompress) {
7731 nassertv(do_has_ram_image(cdata));
7732
7733 if (do_get_expected_num_mipmap_levels(cdata) == 1) {
7734 // Don't bother.
7735 return;
7736 }
7737
7738 RamImage orig_compressed_image;
7739 CompressionMode orig_compression_mode = CM_off;
7740
7741 if (cdata->_ram_image_compression != CM_off) {
7742 // The RAM image is compressed. This means we need to uncompress it in
7743 // order to generate mipmap images. Save the original first, to avoid
7744 // lossy recompression.
7745 orig_compressed_image = cdata->_ram_images[0];
7746 orig_compression_mode = cdata->_ram_image_compression;
7747
7748 // Now try to get the uncompressed source image.
7749 do_get_uncompressed_ram_image(cdata);
7750
7751 if (cdata->_ram_image_compression != CM_off) {
7752 gobj_cat.error()
7753 << "Cannot generate mipmap levels for image with compression "
7754 << cdata->_ram_image_compression << "\n";
7755 return;
7756 }
7757 }
7758
7759 do_clear_ram_mipmap_images(cdata);
7760
7761 if (gobj_cat.is_debug()) {
7762 gobj_cat.debug()
7763 << "Generating mipmap levels for " << *this << "\n";
7764 }
7765
7766 if (cdata->_texture_type == Texture::TT_3d_texture && cdata->_z_size != 1) {
7767 // Eek, a 3-D texture.
7768 int x_size = cdata->_x_size;
7769 int y_size = cdata->_y_size;
7770 int z_size = cdata->_z_size;
7771 int n = 0;
7772 while (x_size > 1 || y_size > 1 || z_size > 1) {
7773 cdata->_ram_images.push_back(RamImage());
7774 do_filter_3d_mipmap_level(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7775 x_size, y_size, z_size);
7776 x_size = max(x_size >> 1, 1);
7777 y_size = max(y_size >> 1, 1);
7778 z_size = max(z_size >> 1, 1);
7779 ++n;
7780 }
7781
7782 } else {
7783 // A 1-D, 2-D, or cube map texture.
7784 int x_size = cdata->_x_size;
7785 int y_size = cdata->_y_size;
7786 int n = 0;
7787 while (x_size > 1 || y_size > 1) {
7788 cdata->_ram_images.push_back(RamImage());
7789 do_filter_2d_mipmap_pages(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7790 x_size, y_size);
7791 x_size = max(x_size >> 1, 1);
7792 y_size = max(y_size >> 1, 1);
7793 ++n;
7794 }
7795 }
7796
7797 if (orig_compression_mode != CM_off && allow_recompress) {
7798 // Now attempt to recompress the mipmap images according to the original
7799 // compression mode. We don't need to bother compressing the first image
7800 // (it was already compressed, after all), so temporarily remove it from
7801 // the top of the mipmap stack, and compress all of the rest of them
7802 // instead.
7803 nassertv(cdata->_ram_images.size() > 1);
7804 int l0_x_size = cdata->_x_size;
7805 int l0_y_size = cdata->_y_size;
7806 int l0_z_size = cdata->_z_size;
7807 cdata->_x_size = do_get_expected_mipmap_x_size(cdata, 1);
7808 cdata->_y_size = do_get_expected_mipmap_y_size(cdata, 1);
7809 cdata->_z_size = do_get_expected_mipmap_z_size(cdata, 1);
7810 RamImage uncompressed_image = cdata->_ram_images[0];
7811 cdata->_ram_images.erase(cdata->_ram_images.begin());
7812
7813 bool success = do_compress_ram_image(cdata, orig_compression_mode, QL_default, nullptr);
7814 // Now restore the toplevel image.
7815 if (success) {
7816 if (gobj_cat.is_debug()) {
7817 gobj_cat.debug()
7818 << "Compressed " << get_name() << " generated mipmaps with "
7819 << cdata->_ram_image_compression << "\n";
7820 }
7821 cdata->_ram_images.insert(cdata->_ram_images.begin(), orig_compressed_image);
7822 } else {
7823 cdata->_ram_images.insert(cdata->_ram_images.begin(), uncompressed_image);
7824 }
7825 cdata->_x_size = l0_x_size;
7826 cdata->_y_size = l0_y_size;
7827 cdata->_z_size = l0_z_size;
7828 }
7829}
7830
7831/**
7832 *
7833 */
7834void Texture::
7835do_set_pad_size(CData *cdata, int x, int y, int z) {
7836 if (x > cdata->_x_size) {
7837 x = cdata->_x_size;
7838 }
7839 if (y > cdata->_y_size) {
7840 y = cdata->_y_size;
7841 }
7842 if (z > cdata->_z_size) {
7843 z = cdata->_z_size;
7844 }
7845
7846 cdata->_pad_x_size = x;
7847 cdata->_pad_y_size = y;
7848 cdata->_pad_z_size = z;
7849}
7850
7851/**
7852 * Returns true if we can safely call do_reload_ram_image() in order to make
7853 * the image available, or false if we shouldn't do this (because we know from
7854 * a priori knowledge that it wouldn't work anyway).
7855 */
7856bool Texture::
7857do_can_reload(const CData *cdata) const {
7858 return (cdata->_loaded_from_image && !cdata->_fullpath.empty());
7859}
7860
7861/**
7862 *
7863 */
7864bool Texture::
7865do_reload(CData *cdata) {
7866 if (do_can_reload(cdata)) {
7867 do_clear_ram_image(cdata);
7868 do_reload_ram_image(cdata, true);
7869 if (do_has_ram_image(cdata)) {
7870 // An explicit call to reload() should increment image_modified.
7871 cdata->inc_image_modified();
7872 return true;
7873 }
7874 return false;
7875 }
7876
7877 // We don't have a filename to load from.
7878 return false;
7879}
7880
7881/**
7882 * Returns true if there is a rawdata image that we have available to write to
7883 * the bam stream. For a normal Texture, this is the same thing as
7884 * do_has_ram_image(), but a movie texture might define it differently.
7885 */
7886bool Texture::
7887do_has_bam_rawdata(const CData *cdata) const {
7888 return do_has_ram_image(cdata);
7889}
7890
7891/**
7892 * If do_has_bam_rawdata() returned false, this attempts to reload the rawdata
7893 * image if possible.
7894 */
7895void Texture::
7896do_get_bam_rawdata(CData *cdata) {
7897 do_get_ram_image(cdata);
7898}
7899
7900/**
7901 * Internal method to convert pixel data from the indicated PNMImage into the
7902 * given ram_image.
7903 */
7904void Texture::
7905convert_from_pnmimage(PTA_uchar &image, size_t page_size,
7906 int row_stride, int x, int y, int z,
7907 const PNMImage &pnmimage, int num_components,
7908 int component_width) {
7909 int x_size = pnmimage.get_x_size();
7910 int y_size = pnmimage.get_y_size();
7911 xelval maxval = pnmimage.get_maxval();
7912 int pixel_size = num_components * component_width;
7913
7914 int row_skip = 0;
7915 if (row_stride == 0) {
7916 row_stride = x_size;
7917 } else {
7918 row_skip = (row_stride - x_size) * pixel_size;
7919 nassertv(row_skip >= 0);
7920 }
7921
7922 bool is_grayscale = (num_components == 1 || num_components == 2);
7923 bool has_alpha = (num_components == 2 || num_components == 4);
7924 bool img_has_alpha = pnmimage.has_alpha();
7925
7926 int idx = page_size * z;
7927 nassertv(idx + page_size <= image.size());
7928 unsigned char *p = &image[idx];
7929
7930 if (x != 0 || y != 0) {
7931 p += (row_stride * y + x) * pixel_size;
7932 }
7933
7934 if (maxval == 255 && component_width == 1) {
7935 // Most common case: one byte per pixel, and the source image shows a
7936 // maxval of 255. No scaling is necessary. Because this is such a common
7937 // case, we break it out per component for best performance.
7938 const xel *array = pnmimage.get_array();
7939 switch (num_components) {
7940 case 1:
7941 for (int j = y_size-1; j >= 0; j--) {
7942 const xel *row = array + j * x_size;
7943 for (int i = 0; i < x_size; i++) {
7944 *p++ = (uchar)PPM_GETB(row[i]);
7945 }
7946 p += row_skip;
7947 }
7948 break;
7949
7950 case 2:
7951 if (img_has_alpha) {
7952 const xelval *alpha = pnmimage.get_alpha_array();
7953 for (int j = y_size-1; j >= 0; j--) {
7954 const xel *row = array + j * x_size;
7955 const xelval *alpha_row = alpha + j * x_size;
7956 for (int i = 0; i < x_size; i++) {
7957 *p++ = (uchar)PPM_GETB(row[i]);
7958 *p++ = (uchar)alpha_row[i];
7959 }
7960 p += row_skip;
7961 }
7962 } else {
7963 for (int j = y_size-1; j >= 0; j--) {
7964 const xel *row = array + j * x_size;
7965 for (int i = 0; i < x_size; i++) {
7966 *p++ = (uchar)PPM_GETB(row[i]);
7967 *p++ = (uchar)255;
7968 }
7969 p += row_skip;
7970 }
7971 }
7972 break;
7973
7974 case 3:
7975 for (int j = y_size-1; j >= 0; j--) {
7976 const xel *row = array + j * x_size;
7977 for (int i = 0; i < x_size; i++) {
7978 *p++ = (uchar)PPM_GETB(row[i]);
7979 *p++ = (uchar)PPM_GETG(row[i]);
7980 *p++ = (uchar)PPM_GETR(row[i]);
7981 }
7982 p += row_skip;
7983 }
7984 break;
7985
7986 case 4:
7987 if (img_has_alpha) {
7988 const xelval *alpha = pnmimage.get_alpha_array();
7989 for (int j = y_size-1; j >= 0; j--) {
7990 const xel *row = array + j * x_size;
7991 const xelval *alpha_row = alpha + j * x_size;
7992 for (int i = 0; i < x_size; i++) {
7993 *p++ = (uchar)PPM_GETB(row[i]);
7994 *p++ = (uchar)PPM_GETG(row[i]);
7995 *p++ = (uchar)PPM_GETR(row[i]);
7996 *p++ = (uchar)alpha_row[i];
7997 }
7998 p += row_skip;
7999 }
8000 } else {
8001 for (int j = y_size-1; j >= 0; j--) {
8002 const xel *row = array + j * x_size;
8003 for (int i = 0; i < x_size; i++) {
8004 *p++ = (uchar)PPM_GETB(row[i]);
8005 *p++ = (uchar)PPM_GETG(row[i]);
8006 *p++ = (uchar)PPM_GETR(row[i]);
8007 *p++ = (uchar)255;
8008 }
8009 p += row_skip;
8010 }
8011 }
8012 break;
8013
8014 default:
8015 nassertv(num_components >= 1 && num_components <= 4);
8016 break;
8017 }
8018
8019 } else if (maxval == 65535 && component_width == 2) {
8020 // Another possible case: two bytes per pixel, and the source image shows
8021 // a maxval of 65535. Again, no scaling is necessary.
8022 for (int j = y_size-1; j >= 0; j--) {
8023 for (int i = 0; i < x_size; i++) {
8024 if (is_grayscale) {
8025 store_unscaled_short(p, pnmimage.get_gray_val(i, j));
8026 } else {
8027 store_unscaled_short(p, pnmimage.get_blue_val(i, j));
8028 store_unscaled_short(p, pnmimage.get_green_val(i, j));
8029 store_unscaled_short(p, pnmimage.get_red_val(i, j));
8030 }
8031 if (has_alpha) {
8032 if (img_has_alpha) {
8033 store_unscaled_short(p, pnmimage.get_alpha_val(i, j));
8034 } else {
8035 store_unscaled_short(p, 65535);
8036 }
8037 }
8038 }
8039 p += row_skip;
8040 }
8041
8042 } else if (component_width == 1) {
8043 // A less common case: one byte per pixel, but the maxval is something
8044 // other than 255. In this case, we should scale the pixel values up to
8045 // the appropriate amount.
8046 double scale = 255.0 / (double)maxval;
8047
8048 for (int j = y_size-1; j >= 0; j--) {
8049 for (int i = 0; i < x_size; i++) {
8050 if (is_grayscale) {
8051 store_scaled_byte(p, pnmimage.get_gray_val(i, j), scale);
8052 } else {
8053 store_scaled_byte(p, pnmimage.get_blue_val(i, j), scale);
8054 store_scaled_byte(p, pnmimage.get_green_val(i, j), scale);
8055 store_scaled_byte(p, pnmimage.get_red_val(i, j), scale);
8056 }
8057 if (has_alpha) {
8058 if (img_has_alpha) {
8059 store_scaled_byte(p, pnmimage.get_alpha_val(i, j), scale);
8060 } else {
8061 store_unscaled_byte(p, 255);
8062 }
8063 }
8064 }
8065 p += row_skip;
8066 }
8067
8068 } else { // component_width == 2
8069 // Another uncommon case: two bytes per pixel, and the maxval is something
8070 // other than 65535. Again, we must scale the pixel values.
8071 double scale = 65535.0 / (double)maxval;
8072
8073 for (int j = y_size-1; j >= 0; j--) {
8074 for (int i = 0; i < x_size; i++) {
8075 if (is_grayscale) {
8076 store_scaled_short(p, pnmimage.get_gray_val(i, j), scale);
8077 } else {
8078 store_scaled_short(p, pnmimage.get_blue_val(i, j), scale);
8079 store_scaled_short(p, pnmimage.get_green_val(i, j), scale);
8080 store_scaled_short(p, pnmimage.get_red_val(i, j), scale);
8081 }
8082 if (has_alpha) {
8083 if (img_has_alpha) {
8084 store_scaled_short(p, pnmimage.get_alpha_val(i, j), 1.0);
8085 } else {
8086 store_unscaled_short(p, 65535);
8087 }
8088 }
8089 }
8090 p += row_skip;
8091 }
8092 }
8093}
8094
8095/**
8096 * Internal method to convert pixel data from the indicated PfmFile into the
8097 * given ram_image.
8098 */
8099void Texture::
8100convert_from_pfm(PTA_uchar &image, size_t page_size, int z,
8101 const PfmFile &pfm, int num_components, int component_width) {
8102 nassertv(component_width == 4); // Currently only PN_float32 is expected.
8103 int x_size = pfm.get_x_size();
8104 int y_size = pfm.get_y_size();
8105
8106 int idx = page_size * z;
8107 nassertv(idx + page_size <= image.size());
8108 PN_float32 *p = (PN_float32 *)&image[idx];
8109
8110 switch (num_components) {
8111 case 1:
8112 {
8113 for (int j = y_size-1; j >= 0; j--) {
8114 for (int i = 0; i < x_size; i++) {
8115 p[0] = pfm.get_channel(i, j, 0);
8116 ++p;
8117 }
8118 }
8119 }
8120 break;
8121
8122 case 2:
8123 {
8124 for (int j = y_size-1; j >= 0; j--) {
8125 for (int i = 0; i < x_size; i++) {
8126 p[0] = pfm.get_channel(i, j, 0);
8127 p[1] = pfm.get_channel(i, j, 1);
8128 p += 2;
8129 }
8130 }
8131 }
8132 break;
8133
8134 case 3:
8135 {
8136 // RGB -> BGR
8137 for (int j = y_size-1; j >= 0; j--) {
8138 for (int i = 0; i < x_size; i++) {
8139 p[0] = pfm.get_channel(i, j, 2);
8140 p[1] = pfm.get_channel(i, j, 1);
8141 p[2] = pfm.get_channel(i, j, 0);
8142 p += 3;
8143 }
8144 }
8145 }
8146 break;
8147
8148 case 4:
8149 {
8150 // RGBA -> BGRA
8151 for (int j = y_size-1; j >= 0; j--) {
8152 for (int i = 0; i < x_size; i++) {
8153 p[0] = pfm.get_channel(i, j, 2);
8154 p[1] = pfm.get_channel(i, j, 1);
8155 p[2] = pfm.get_channel(i, j, 0);
8156 p[3] = pfm.get_channel(i, j, 3);
8157 p += 4;
8158 }
8159 }
8160 }
8161 break;
8162
8163 default:
8164 nassert_raise("unexpected channel count");
8165 return;
8166 }
8167
8168 nassertv((unsigned char *)p == &image[idx] + page_size);
8169}
8170
8171/**
8172 * Internal method to convert pixel data to the indicated PNMImage from the
8173 * given ram_image.
8174 */
8175bool Texture::
8176convert_to_pnmimage(PNMImage &pnmimage, int x_size, int y_size,
8177 int num_components, ComponentType component_type,
8178 bool is_srgb, CPTA_uchar image, size_t page_size, int z) {
8179 xelval maxval = 0xff;
8180 if (component_type != T_unsigned_byte && component_type != T_byte) {
8181 maxval = 0xffff;
8182 }
8183 ColorSpace color_space = is_srgb ? CS_sRGB : CS_linear;
8184 pnmimage.clear(x_size, y_size, num_components, maxval, nullptr, color_space);
8185 bool has_alpha = pnmimage.has_alpha();
8186 bool is_grayscale = pnmimage.is_grayscale();
8187
8188 int idx = page_size * z;
8189 nassertr(idx + page_size <= image.size(), false);
8190
8191 xel *array = pnmimage.get_array();
8192 xelval *alpha = pnmimage.get_alpha_array();
8193
8194 switch (component_type) {
8195 case T_unsigned_byte:
8196 if (is_grayscale) {
8197 const unsigned char *p = &image[idx];
8198 if (has_alpha) {
8199 for (int j = y_size-1; j >= 0; j--) {
8200 xel *row = array + j * x_size;
8201 xelval *alpha_row = alpha + j * x_size;
8202 for (int i = 0; i < x_size; i++) {
8203 PPM_PUTB(row[i], *p++);
8204 alpha_row[i] = *p++;
8205 }
8206 }
8207 } else {
8208 for (int j = y_size-1; j >= 0; j--) {
8209 xel *row = array + j * x_size;
8210 for (int i = 0; i < x_size; i++) {
8211 PPM_PUTB(row[i], *p++);
8212 }
8213 }
8214 }
8215 nassertr(p == &image[idx] + page_size, false);
8216 } else {
8217 const unsigned char *p = &image[idx];
8218 if (has_alpha) {
8219 for (int j = y_size-1; j >= 0; j--) {
8220 xel *row = array + j * x_size;
8221 xelval *alpha_row = alpha + j * x_size;
8222 for (int i = 0; i < x_size; i++) {
8223 PPM_PUTB(row[i], *p++);
8224 PPM_PUTG(row[i], *p++);
8225 PPM_PUTR(row[i], *p++);
8226 alpha_row[i] = *p++;
8227 }
8228 }
8229 } else {
8230 for (int j = y_size-1; j >= 0; j--) {
8231 xel *row = array + j * x_size;
8232 for (int i = 0; i < x_size; i++) {
8233 PPM_PUTB(row[i], *p++);
8234 PPM_PUTG(row[i], *p++);
8235 PPM_PUTR(row[i], *p++);
8236 }
8237 }
8238 }
8239 nassertr(p == &image[idx] + page_size, false);
8240 }
8241 break;
8242
8243 case T_unsigned_short:
8244 {
8245 const uint16_t *p = (const uint16_t *)&image[idx];
8246
8247 for (int j = y_size-1; j >= 0; j--) {
8248 xel *row = array + j * x_size;
8249 xelval *alpha_row = alpha + j * x_size;
8250 for (int i = 0; i < x_size; i++) {
8251 PPM_PUTB(row[i], *p++);
8252 if (!is_grayscale) {
8253 PPM_PUTG(row[i], *p++);
8254 PPM_PUTR(row[i], *p++);
8255 }
8256 if (has_alpha) {
8257 alpha_row[i] = *p++;
8258 }
8259 }
8260 }
8261 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8262 }
8263 break;
8264
8265 case T_unsigned_int:
8266 {
8267 const uint32_t *p = (const uint32_t *)&image[idx];
8268
8269 for (int j = y_size-1; j >= 0; j--) {
8270 xel *row = array + j * x_size;
8271 xelval *alpha_row = alpha + j * x_size;
8272 for (int i = 0; i < x_size; i++) {
8273 PPM_PUTB(row[i], (*p++) >> 16u);
8274 if (!is_grayscale) {
8275 PPM_PUTG(row[i], (*p++) >> 16u);
8276 PPM_PUTR(row[i], (*p++) >> 16u);
8277 }
8278 if (has_alpha) {
8279 alpha_row[i] = (*p++) >> 16u;
8280 }
8281 }
8282 }
8283 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8284 }
8285 break;
8286
8287 case T_half_float:
8288 {
8289 const unsigned char *p = &image[idx];
8290
8291 for (int j = y_size-1; j >= 0; j--) {
8292 for (int i = 0; i < x_size; i++) {
8293 pnmimage.set_blue(i, j, get_half_float(p));
8294 if (!is_grayscale) {
8295 pnmimage.set_green(i, j, get_half_float(p));
8296 pnmimage.set_red(i, j, get_half_float(p));
8297 }
8298 if (has_alpha) {
8299 pnmimage.set_alpha(i, j, get_half_float(p));
8300 }
8301 }
8302 }
8303 nassertr(p == &image[idx] + page_size, false);
8304 }
8305 break;
8306
8307 default:
8308 return false;
8309 }
8310
8311 return true;
8312}
8313
8314/**
8315 * Internal method to convert pixel data to the indicated PfmFile from the
8316 * given ram_image.
8317 */
8318bool Texture::
8319convert_to_pfm(PfmFile &pfm, int x_size, int y_size,
8320 int num_components, int component_width,
8321 CPTA_uchar image, size_t page_size, int z) {
8322 nassertr(component_width == 4, false); // Currently only PN_float32 is expected.
8323 pfm.clear(x_size, y_size, num_components);
8324
8325 int idx = page_size * z;
8326 nassertr(idx + page_size <= image.size(), false);
8327 const PN_float32 *p = (const PN_float32 *)&image[idx];
8328
8329 switch (num_components) {
8330 case 1:
8331 for (int j = y_size-1; j >= 0; j--) {
8332 for (int i = 0; i < x_size; i++) {
8333 pfm.set_channel(i, j, 0, p[0]);
8334 ++p;
8335 }
8336 }
8337 break;
8338
8339 case 2:
8340 for (int j = y_size-1; j >= 0; j--) {
8341 for (int i = 0; i < x_size; i++) {
8342 pfm.set_channel(i, j, 0, p[0]);
8343 pfm.set_channel(i, j, 1, p[1]);
8344 p += 2;
8345 }
8346 }
8347 break;
8348
8349 case 3:
8350 // BGR -> RGB
8351 for (int j = y_size-1; j >= 0; j--) {
8352 for (int i = 0; i < x_size; i++) {
8353 pfm.set_channel(i, j, 2, p[0]);
8354 pfm.set_channel(i, j, 1, p[1]);
8355 pfm.set_channel(i, j, 0, p[2]);
8356 p += 3;
8357 }
8358 }
8359 break;
8360
8361 case 4:
8362 // BGRA -> RGBA
8363 for (int j = y_size-1; j >= 0; j--) {
8364 for (int i = 0; i < x_size; i++) {
8365 pfm.set_channel(i, j, 2, p[0]);
8366 pfm.set_channel(i, j, 1, p[1]);
8367 pfm.set_channel(i, j, 0, p[2]);
8368 pfm.set_channel(i, j, 3, p[3]);
8369 p += 4;
8370 }
8371 }
8372 break;
8373
8374 default:
8375 nassert_raise("unexpected channel count");
8376 return false;
8377 }
8378
8379 nassertr((unsigned char *)p == &image[idx] + page_size, false);
8380 return true;
8381}
8382
8383/**
8384 * Called by read_dds for a DDS file in BGR8 format.
8385 */
8386PTA_uchar Texture::
8387read_dds_level_bgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8388 // This is in order B, G, R.
8389 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8390 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8391
8392 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8393 size_t row_bytes = x_size * 3;
8394 PTA_uchar image = PTA_uchar::empty_array(size);
8395 for (int y = y_size - 1; y >= 0; --y) {
8396 unsigned char *p = image.p() + y * row_bytes;
8397 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8398 in.read((char *)p, row_bytes);
8399 }
8400
8401 return image;
8402}
8403
8404/**
8405 * Called by read_dds for a DDS file in RGB8 format.
8406 */
8407PTA_uchar Texture::
8408read_dds_level_rgb8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8409 // This is in order R, G, B.
8410 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8411 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8412
8413 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8414 size_t row_bytes = x_size * 3;
8415 PTA_uchar image = PTA_uchar::empty_array(size);
8416 for (int y = y_size - 1; y >= 0; --y) {
8417 unsigned char *p = image.p() + y * row_bytes;
8418 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8419 in.read((char *)p, row_bytes);
8420
8421 // Now reverse the r, g, b triples.
8422 for (int x = 0; x < x_size; ++x) {
8423 unsigned char r = p[0];
8424 p[0] = p[2];
8425 p[2] = r;
8426 p += 3;
8427 }
8428 nassertr(p <= image.p() + size, PTA_uchar());
8429 }
8430
8431 return image;
8432}
8433
8434/**
8435 * Called by read_dds for a DDS file in ABGR8 format.
8436 */
8437PTA_uchar Texture::
8438read_dds_level_abgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8439 // This is laid out in order R, G, B, A.
8440 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8441 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8442
8443 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8444 size_t row_bytes = x_size * 4;
8445 PTA_uchar image = PTA_uchar::empty_array(size);
8446 for (int y = y_size - 1; y >= 0; --y) {
8447 unsigned char *p = image.p() + y * row_bytes;
8448 in.read((char *)p, row_bytes);
8449
8450 uint32_t *pw = (uint32_t *)p;
8451 for (int x = 0; x < x_size; ++x) {
8452 uint32_t w = *pw;
8453#ifdef WORDS_BIGENDIAN
8454 // bigendian: convert R, G, B, A to B, G, R, A.
8455 w = ((w & 0xff00) << 16) | ((w & 0xff000000U) >> 16) | (w & 0xff00ff);
8456#else
8457 // littendian: convert A, B, G, R to to A, R, G, B.
8458 w = ((w & 0xff) << 16) | ((w & 0xff0000) >> 16) | (w & 0xff00ff00U);
8459#endif
8460 *pw = w;
8461 ++pw;
8462 }
8463 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8464 }
8465
8466 return image;
8467}
8468
8469/**
8470 * Called by read_dds for a DDS file in RGBA8 format.
8471 */
8472PTA_uchar Texture::
8473read_dds_level_rgba8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8474 // This is actually laid out in order B, G, R, A.
8475 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8476 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8477
8478 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8479 size_t row_bytes = x_size * 4;
8480 PTA_uchar image = PTA_uchar::empty_array(size);
8481 for (int y = y_size - 1; y >= 0; --y) {
8482 unsigned char *p = image.p() + y * row_bytes;
8483 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8484 in.read((char *)p, row_bytes);
8485 }
8486
8487 return image;
8488}
8489
8490/**
8491 * Called by read_dds for a DDS file in ABGR16 format.
8492 */
8493PTA_uchar Texture::
8494read_dds_level_abgr16(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8495 // This is laid out in order R, G, B, A.
8496 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8497 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8498
8499 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8500 size_t row_bytes = x_size * 8;
8501 PTA_uchar image = PTA_uchar::empty_array(size);
8502 for (int y = y_size - 1; y >= 0; --y) {
8503 unsigned char *p = image.p() + y * row_bytes;
8504 in.read((char *)p, row_bytes);
8505
8506 uint16_t *pw = (uint16_t *)p;
8507 for (int x = 0; x < x_size; ++x) {
8508 swap(pw[0], pw[2]);
8509 pw += 4;
8510 }
8511 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8512 }
8513
8514 return image;
8515}
8516
8517/**
8518 * Called by read_dds for a DDS file in ABGR32 format.
8519 */
8520PTA_uchar Texture::
8521read_dds_level_abgr32(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8522 // This is laid out in order R, G, B, A.
8523 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8524 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8525
8526 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8527 size_t row_bytes = x_size * 16;
8528 nassertr(row_bytes * y_size == size, PTA_uchar());
8529 PTA_uchar image = PTA_uchar::empty_array(size);
8530 for (int y = y_size - 1; y >= 0; --y) {
8531 unsigned char *p = image.p() + y * row_bytes;
8532 in.read((char *)p, row_bytes);
8533
8534 uint32_t *pw = (uint32_t *)p;
8535 for (int x = 0; x < x_size; ++x) {
8536 swap(pw[0], pw[2]);
8537 pw += 4;
8538 }
8539 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8540 }
8541
8542 return image;
8543}
8544
8545/**
8546 * Called by read_dds for a DDS file that needs no transformations applied.
8547 */
8548PTA_uchar Texture::
8549read_dds_level_raw(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8550 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8551 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8552
8553 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8554 size_t row_bytes = x_size * cdata->_num_components * cdata->_component_width;
8555 nassertr(row_bytes * y_size == size, PTA_uchar());
8556 PTA_uchar image = PTA_uchar::empty_array(size);
8557 for (int y = y_size - 1; y >= 0; --y) {
8558 unsigned char *p = image.p() + y * row_bytes;
8559 in.read((char *)p, row_bytes);
8560 }
8561
8562 return image;
8563}
8564
8565/**
8566 * Called by read_dds for a DDS file whose format isn't one we've specifically
8567 * optimized.
8568 */
8569PTA_uchar Texture::
8570read_dds_level_generic_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8571 int n, istream &in) {
8572 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8573 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8574
8575 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8576
8577 // MS says the pitch can be supplied in the header file and must be DWORD
8578 // aligned, but this appears to apply to level 0 mipmaps only (where it
8579 // almost always will be anyway). Other mipmap levels seem to be tightly
8580 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8581 if (n == 0) {
8582 pitch = ((pitch + 3) / 4) * 4;
8583 if (header.dds_flags & DDSD_PITCH) {
8584 pitch = header.pitch;
8585 }
8586 }
8587
8588 int bpp = header.pf.rgb_bitcount / 8;
8589 int skip_bytes = pitch - (bpp * x_size);
8590 nassertr(skip_bytes >= 0, PTA_uchar());
8591
8592 unsigned int r_mask = header.pf.r_mask;
8593 unsigned int g_mask = header.pf.g_mask;
8594 unsigned int b_mask = header.pf.b_mask;
8595 unsigned int a_mask = header.pf.a_mask;
8596
8597 // Determine the number of bits to shift each mask to the right so that the
8598 // lowest on bit is at bit 0.
8599 int r_shift = get_lowest_on_bit(r_mask);
8600 int g_shift = get_lowest_on_bit(g_mask);
8601 int b_shift = get_lowest_on_bit(b_mask);
8602 int a_shift = get_lowest_on_bit(a_mask);
8603
8604 // Then determine the scale factor required to raise the highest color value
8605 // to 0xff000000.
8606 unsigned int r_scale = 0;
8607 if (r_mask != 0) {
8608 r_scale = 0xff000000 / (r_mask >> r_shift);
8609 }
8610 unsigned int g_scale = 0;
8611 if (g_mask != 0) {
8612 g_scale = 0xff000000 / (g_mask >> g_shift);
8613 }
8614 unsigned int b_scale = 0;
8615 if (b_mask != 0) {
8616 b_scale = 0xff000000 / (b_mask >> b_shift);
8617 }
8618 unsigned int a_scale = 0;
8619 if (a_mask != 0) {
8620 a_scale = 0xff000000 / (a_mask >> a_shift);
8621 }
8622
8623 bool add_alpha = has_alpha(cdata->_format);
8624
8625 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8626 size_t row_bytes = x_size * cdata->_num_components;
8627 PTA_uchar image = PTA_uchar::empty_array(size);
8628 for (int y = y_size - 1; y >= 0; --y) {
8629 unsigned char *p = image.p() + y * row_bytes;
8630 for (int x = 0; x < x_size; ++x) {
8631
8632 // Read a little-endian numeric value of bpp bytes.
8633 unsigned int pixel = 0;
8634 int shift = 0;
8635 for (int bi = 0; bi < bpp; ++bi) {
8636 unsigned int ch = (unsigned char)in.get();
8637 pixel |= (ch << shift);
8638 shift += 8;
8639 }
8640
8641 // Then break apart that value into its R, G, B, and maybe A components.
8642 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8643 unsigned int g = (((pixel & g_mask) >> g_shift) * g_scale) >> 24;
8644 unsigned int b = (((pixel & b_mask) >> b_shift) * b_scale) >> 24;
8645
8646 // Store the components in the Texture's image data.
8647 store_unscaled_byte(p, b);
8648 store_unscaled_byte(p, g);
8649 store_unscaled_byte(p, r);
8650 if (add_alpha) {
8651 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8652 store_unscaled_byte(p, a);
8653 }
8654 }
8655 nassertr(p <= image.p() + size, PTA_uchar());
8656 for (int bi = 0; bi < skip_bytes; ++bi) {
8657 in.get();
8658 }
8659 }
8660
8661 return image;
8662}
8663
8664/**
8665 * Called by read_dds for a DDS file in uncompressed luminance or luminance-
8666 * alpha format.
8667 */
8668PTA_uchar Texture::
8669read_dds_level_luminance_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8670 int n, istream &in) {
8671 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8672 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8673
8674 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8675
8676 // MS says the pitch can be supplied in the header file and must be DWORD
8677 // aligned, but this appears to apply to level 0 mipmaps only (where it
8678 // almost always will be anyway). Other mipmap levels seem to be tightly
8679 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8680 if (n == 0) {
8681 pitch = ((pitch + 3) / 4) * 4;
8682 if (header.dds_flags & DDSD_PITCH) {
8683 pitch = header.pitch;
8684 }
8685 }
8686
8687 int bpp = header.pf.rgb_bitcount / 8;
8688 int skip_bytes = pitch - (bpp * x_size);
8689 nassertr(skip_bytes >= 0, PTA_uchar());
8690
8691 unsigned int r_mask = header.pf.r_mask;
8692 unsigned int a_mask = header.pf.a_mask;
8693
8694 // Determine the number of bits to shift each mask to the right so that the
8695 // lowest on bit is at bit 0.
8696 int r_shift = get_lowest_on_bit(r_mask);
8697 int a_shift = get_lowest_on_bit(a_mask);
8698
8699 // Then determine the scale factor required to raise the highest color value
8700 // to 0xff000000.
8701 unsigned int r_scale = 0;
8702 if (r_mask != 0) {
8703 r_scale = 0xff000000 / (r_mask >> r_shift);
8704 }
8705 unsigned int a_scale = 0;
8706 if (a_mask != 0) {
8707 a_scale = 0xff000000 / (a_mask >> a_shift);
8708 }
8709
8710 bool add_alpha = has_alpha(cdata->_format);
8711
8712 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8713 size_t row_bytes = x_size * cdata->_num_components;
8714 PTA_uchar image = PTA_uchar::empty_array(size);
8715 for (int y = y_size - 1; y >= 0; --y) {
8716 unsigned char *p = image.p() + y * row_bytes;
8717 for (int x = 0; x < x_size; ++x) {
8718
8719 // Read a little-endian numeric value of bpp bytes.
8720 unsigned int pixel = 0;
8721 int shift = 0;
8722 for (int bi = 0; bi < bpp; ++bi) {
8723 unsigned int ch = (unsigned char)in.get();
8724 pixel |= (ch << shift);
8725 shift += 8;
8726 }
8727
8728 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8729
8730 // Store the components in the Texture's image data.
8731 store_unscaled_byte(p, r);
8732 if (add_alpha) {
8733 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8734 store_unscaled_byte(p, a);
8735 }
8736 }
8737 nassertr(p <= image.p() + size, PTA_uchar());
8738 for (int bi = 0; bi < skip_bytes; ++bi) {
8739 in.get();
8740 }
8741 }
8742
8743 return image;
8744}
8745
8746/**
8747 * Called by read_dds for DXT1 file format.
8748 */
8749PTA_uchar Texture::
8750read_dds_level_bc1(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8751 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8752 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8753
8754 static const int div = 4;
8755 static const int block_bytes = 8;
8756
8757 // The DXT1 image is divided into num_rows x num_cols blocks, where each
8758 // block represents 4x4 pixels.
8759 int num_cols = max(div, x_size) / div;
8760 int num_rows = max(div, y_size) / div;
8761 int row_length = num_cols * block_bytes;
8762 int linear_size = row_length * num_rows;
8763
8764 if (n == 0) {
8765 if (header.dds_flags & DDSD_LINEARSIZE) {
8766 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8767 }
8768 }
8769
8770 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8771
8772 if (y_size >= 4) {
8773 // We have to flip the image as we read it, because of DirectX's inverted
8774 // sense of up. That means we (a) reverse the order of the rows of blocks
8775 // . . .
8776 for (int ri = num_rows - 1; ri >= 0; --ri) {
8777 unsigned char *p = image.p() + row_length * ri;
8778 in.read((char *)p, row_length);
8779
8780 for (int ci = 0; ci < num_cols; ++ci) {
8781 // . . . and (b) within each block, we reverse the 4 individual rows
8782 // of 4 pixels.
8783 uint32_t *cells = (uint32_t *)p;
8784 uint32_t w = cells[1];
8785 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8786 cells[1] = w;
8787
8788 p += block_bytes;
8789 }
8790 }
8791
8792 } else if (y_size >= 2) {
8793 // To invert a two-pixel high image, we just flip two rows within a cell.
8794 unsigned char *p = image.p();
8795 in.read((char *)p, row_length);
8796
8797 for (int ci = 0; ci < num_cols; ++ci) {
8798 uint32_t *cells = (uint32_t *)p;
8799 uint32_t w = cells[1];
8800 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8801 cells[1] = w;
8802
8803 p += block_bytes;
8804 }
8805
8806 } else if (y_size >= 1) {
8807 // No need to invert a one-pixel-high image.
8808 unsigned char *p = image.p();
8809 in.read((char *)p, row_length);
8810 }
8811
8812 return image;
8813}
8814
8815/**
8816 * Called by read_dds for DXT2 or DXT3 file format.
8817 */
8818PTA_uchar Texture::
8819read_dds_level_bc2(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8820 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8821 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8822
8823 static const int div = 4;
8824 static const int block_bytes = 16;
8825
8826 // The DXT3 image is divided into num_rows x num_cols blocks, where each
8827 // block represents 4x4 pixels. Unlike DXT1, each block consists of two
8828 // 8-byte chunks, representing the alpha and color separately.
8829 int num_cols = max(div, x_size) / div;
8830 int num_rows = max(div, y_size) / div;
8831 int row_length = num_cols * block_bytes;
8832 int linear_size = row_length * num_rows;
8833
8834 if (n == 0) {
8835 if (header.dds_flags & DDSD_LINEARSIZE) {
8836 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8837 }
8838 }
8839
8840 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8841
8842 if (y_size >= 4) {
8843 // We have to flip the image as we read it, because of DirectX's inverted
8844 // sense of up. That means we (a) reverse the order of the rows of blocks
8845 // . . .
8846 for (int ri = num_rows - 1; ri >= 0; --ri) {
8847 unsigned char *p = image.p() + row_length * ri;
8848 in.read((char *)p, row_length);
8849
8850 for (int ci = 0; ci < num_cols; ++ci) {
8851 // . . . and (b) within each block, we reverse the 4 individual rows
8852 // of 4 pixels.
8853 uint32_t *cells = (uint32_t *)p;
8854
8855 // Alpha. The block is four 16-bit words of pixel data.
8856 uint32_t w0 = cells[0];
8857 uint32_t w1 = cells[1];
8858 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8859 w1 = ((w1 & 0xffff) << 16) | ((w1 & 0xffff0000U) >> 16);
8860 cells[0] = w1;
8861 cells[1] = w0;
8862
8863 // Color. Only the second 32-bit dword of the color block represents
8864 // the pixel data.
8865 uint32_t w = cells[3];
8866 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8867 cells[3] = w;
8868
8869 p += block_bytes;
8870 }
8871 }
8872
8873 } else if (y_size >= 2) {
8874 // To invert a two-pixel high image, we just flip two rows within a cell.
8875 unsigned char *p = image.p();
8876 in.read((char *)p, row_length);
8877
8878 for (int ci = 0; ci < num_cols; ++ci) {
8879 uint32_t *cells = (uint32_t *)p;
8880
8881 uint32_t w0 = cells[0];
8882 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8883 cells[0] = w0;
8884
8885 uint32_t w = cells[3];
8886 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8887 cells[3] = w;
8888
8889 p += block_bytes;
8890 }
8891
8892 } else if (y_size >= 1) {
8893 // No need to invert a one-pixel-high image.
8894 unsigned char *p = image.p();
8895 in.read((char *)p, row_length);
8896 }
8897
8898 return image;
8899}
8900
8901/**
8902 * Called by read_dds for DXT4 or DXT5 file format.
8903 */
8904PTA_uchar Texture::
8905read_dds_level_bc3(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8906 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8907 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8908
8909 static const int div = 4;
8910 static const int block_bytes = 16;
8911
8912 // The DXT5 image is similar to DXT3, in that there each 4x4 block of pixels
8913 // consists of an alpha block and a color block, but the layout of the alpha
8914 // block is different.
8915 int num_cols = max(div, x_size) / div;
8916 int num_rows = max(div, y_size) / div;
8917 int row_length = num_cols * block_bytes;
8918 int linear_size = row_length * num_rows;
8919
8920 if (n == 0) {
8921 if (header.dds_flags & DDSD_LINEARSIZE) {
8922 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8923 }
8924 }
8925
8926 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8927
8928 if (y_size >= 4) {
8929 // We have to flip the image as we read it, because of DirectX's inverted
8930 // sense of up. That means we (a) reverse the order of the rows of blocks
8931 // . . .
8932 for (int ri = num_rows - 1; ri >= 0; --ri) {
8933 unsigned char *p = image.p() + row_length * ri;
8934 in.read((char *)p, row_length);
8935
8936 for (int ci = 0; ci < num_cols; ++ci) {
8937 // . . . and (b) within each block, we reverse the 4 individual rows
8938 // of 4 pixels.
8939 uint32_t *cells = (uint32_t *)p;
8940
8941 // Alpha. The block is one 16-bit word of reference values, followed
8942 // by six words of pixel values, in 12-bit rows. Tricky to invert.
8943 unsigned char p2 = p[2];
8944 unsigned char p3 = p[3];
8945 unsigned char p4 = p[4];
8946 unsigned char p5 = p[5];
8947 unsigned char p6 = p[6];
8948 unsigned char p7 = p[7];
8949
8950 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
8951 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
8952 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
8953 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8954 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8955 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8956
8957 // Color. Only the second 32-bit dword of the color block represents
8958 // the pixel data.
8959 uint32_t w = cells[3];
8960 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8961 cells[3] = w;
8962
8963 p += block_bytes;
8964 }
8965 }
8966
8967 } else if (y_size >= 2) {
8968 // To invert a two-pixel high image, we just flip two rows within a cell.
8969 unsigned char *p = image.p();
8970 in.read((char *)p, row_length);
8971
8972 for (int ci = 0; ci < num_cols; ++ci) {
8973 uint32_t *cells = (uint32_t *)p;
8974
8975 unsigned char p2 = p[2];
8976 unsigned char p3 = p[3];
8977 unsigned char p4 = p[4];
8978
8979 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8980 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8981 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8982
8983 uint32_t w0 = cells[0];
8984 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8985 cells[0] = w0;
8986
8987 uint32_t w = cells[3];
8988 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8989 cells[3] = w;
8990
8991 p += block_bytes;
8992 }
8993
8994 } else if (y_size >= 1) {
8995 // No need to invert a one-pixel-high image.
8996 unsigned char *p = image.p();
8997 in.read((char *)p, row_length);
8998 }
8999
9000 return image;
9001}
9002
9003/**
9004 * Called by read_dds for ATI1 compression.
9005 */
9006PTA_uchar Texture::
9007read_dds_level_bc4(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9008 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9009 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9010
9011 static const int div = 4;
9012 static const int block_bytes = 8;
9013
9014 // The ATI1 (BC4) format uses the same compression mechanism as the alpha
9015 // channel of DXT5.
9016 int num_cols = max(div, x_size) / div;
9017 int num_rows = max(div, y_size) / div;
9018 int row_length = num_cols * block_bytes;
9019 int linear_size = row_length * num_rows;
9020
9021 if (n == 0) {
9022 if (header.dds_flags & DDSD_LINEARSIZE) {
9023 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9024 }
9025 }
9026
9027 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9028
9029 if (y_size >= 4) {
9030 // We have to flip the image as we read it, because of DirectX's inverted
9031 // sense of up. That means we (a) reverse the order of the rows of blocks
9032 // . . .
9033 for (int ri = num_rows - 1; ri >= 0; --ri) {
9034 unsigned char *p = image.p() + row_length * ri;
9035 in.read((char *)p, row_length);
9036
9037 for (int ci = 0; ci < num_cols; ++ci) {
9038 // . . . and (b) within each block, we reverse the 4 individual rows
9039 // of 4 pixels. The block is one 16-bit word of reference values,
9040 // followed by six words of pixel values, in 12-bit rows. Tricky to
9041 // invert.
9042 unsigned char p2 = p[2];
9043 unsigned char p3 = p[3];
9044 unsigned char p4 = p[4];
9045 unsigned char p5 = p[5];
9046 unsigned char p6 = p[6];
9047 unsigned char p7 = p[7];
9048
9049 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9050 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9051 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9052 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9053 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9054 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9055
9056 p += block_bytes;
9057 }
9058 }
9059
9060 } else if (y_size >= 2) {
9061 // To invert a two-pixel high image, we just flip two rows within a cell.
9062 unsigned char *p = image.p();
9063 in.read((char *)p, row_length);
9064
9065 for (int ci = 0; ci < num_cols; ++ci) {
9066 unsigned char p2 = p[2];
9067 unsigned char p3 = p[3];
9068 unsigned char p4 = p[4];
9069
9070 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9071 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9072 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9073
9074 p += block_bytes;
9075 }
9076
9077 } else if (y_size >= 1) {
9078 // No need to invert a one-pixel-high image.
9079 unsigned char *p = image.p();
9080 in.read((char *)p, row_length);
9081 }
9082
9083 return image;
9084}
9085
9086/**
9087 * Called by read_dds for ATI2 compression.
9088 */
9089PTA_uchar Texture::
9090read_dds_level_bc5(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9091 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9092 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9093
9094 // The ATI2 (BC5) format uses the same compression mechanism as the ATI1
9095 // (BC4) format, but doubles the channels.
9096 int num_cols = max(4, x_size) / 2;
9097 int num_rows = max(4, y_size) / 4;
9098 int row_length = num_cols * 8;
9099 int linear_size = row_length * num_rows;
9100
9101 if (n == 0) {
9102 if (header.dds_flags & DDSD_LINEARSIZE) {
9103 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9104 }
9105 }
9106
9107 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9108
9109 if (y_size >= 4) {
9110 // We have to flip the image as we read it, because of DirectX's inverted
9111 // sense of up. That means we (a) reverse the order of the rows of blocks
9112 // . . .
9113 for (int ri = num_rows - 1; ri >= 0; --ri) {
9114 unsigned char *p = image.p() + row_length * ri;
9115 in.read((char *)p, row_length);
9116
9117 for (int ci = 0; ci < num_cols; ++ci) {
9118 // . . . and (b) within each block, we reverse the 4 individual rows
9119 // of 4 pixels. The block is one 16-bit word of reference values,
9120 // followed by six words of pixel values, in 12-bit rows. Tricky to
9121 // invert.
9122 unsigned char p2 = p[2];
9123 unsigned char p3 = p[3];
9124 unsigned char p4 = p[4];
9125 unsigned char p5 = p[5];
9126 unsigned char p6 = p[6];
9127 unsigned char p7 = p[7];
9128
9129 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9130 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9131 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9132 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9133 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9134 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9135
9136 p += 8;
9137 }
9138 }
9139
9140 } else if (y_size >= 2) {
9141 // To invert a two-pixel high image, we just flip two rows within a cell.
9142 unsigned char *p = image.p();
9143 in.read((char *)p, row_length);
9144
9145 for (int ci = 0; ci < num_cols; ++ci) {
9146 unsigned char p2 = p[2];
9147 unsigned char p3 = p[3];
9148 unsigned char p4 = p[4];
9149
9150 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9151 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9152 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9153
9154 p += 8;
9155 }
9156
9157 } else if (y_size >= 1) {
9158 // No need to invert a one-pixel-high image.
9159 unsigned char *p = image.p();
9160 in.read((char *)p, row_length);
9161 }
9162
9163 return image;
9164}
9165
9166/**
9167 * Removes the indicated PreparedGraphicsObjects table from the Texture's
9168 * table, without actually releasing the texture. This is intended to be
9169 * called only from PreparedGraphicsObjects::release_texture(); it should
9170 * never be called by user code.
9171 */
9172void Texture::
9173clear_prepared(int view, PreparedGraphicsObjects *prepared_objects) {
9174 PreparedViews::iterator pvi;
9175 pvi = _prepared_views.find(prepared_objects);
9176 if (pvi != _prepared_views.end()) {
9177 Contexts &contexts = (*pvi).second;
9178 Contexts::iterator ci;
9179 ci = contexts.find(view);
9180 if (ci != contexts.end()) {
9181 contexts.erase(ci);
9182 }
9183
9184 if (contexts.empty()) {
9185 _prepared_views.erase(pvi);
9186 }
9187 }
9188}
9189
9190/**
9191 * Reduces the number of channels in the texture, if necessary, according to
9192 * num_channels.
9193 */
9194void Texture::
9195consider_downgrade(PNMImage &pnmimage, int num_channels, const string &name) {
9196 if (num_channels != 0 && num_channels < pnmimage.get_num_channels()) {
9197 // One special case: we can't reduce from 3 to 2 components, since that
9198 // would require adding an alpha channel.
9199 if (pnmimage.get_num_channels() == 3 && num_channels == 2) {
9200 return;
9201 }
9202
9203 gobj_cat.info()
9204 << "Downgrading " << name << " from "
9205 << pnmimage.get_num_channels() << " components to "
9206 << num_channels << ".\n";
9207 pnmimage.set_num_channels(num_channels);
9208 }
9209}
9210
9211/**
9212 * Called by generate_simple_ram_image(), this compares the two PNMImages
9213 * pixel-by-pixel. If they're similar enough (within a given threshold),
9214 * returns true.
9215 */
9216bool Texture::
9217compare_images(const PNMImage &a, const PNMImage &b) {
9218 nassertr(a.get_maxval() == 255 && b.get_maxval() == 255, false);
9219 nassertr(a.get_num_channels() == 4 && b.get_num_channels() == 4, false);
9220 nassertr(a.get_x_size() == b.get_x_size() &&
9221 a.get_y_size() == b.get_y_size(), false);
9222
9223 const xel *a_array = a.get_array();
9224 const xel *b_array = b.get_array();
9225 const xelval *a_alpha = a.get_alpha_array();
9226 const xelval *b_alpha = b.get_alpha_array();
9227
9228 int x_size = a.get_x_size();
9229
9230 int delta = 0;
9231 for (int yi = 0; yi < a.get_y_size(); ++yi) {
9232 const xel *a_row = a_array + yi * x_size;
9233 const xel *b_row = b_array + yi * x_size;
9234 const xelval *a_alpha_row = a_alpha + yi * x_size;
9235 const xelval *b_alpha_row = b_alpha + yi * x_size;
9236 for (int xi = 0; xi < x_size; ++xi) {
9237 delta += abs(PPM_GETR(a_row[xi]) - PPM_GETR(b_row[xi]));
9238 delta += abs(PPM_GETG(a_row[xi]) - PPM_GETG(b_row[xi]));
9239 delta += abs(PPM_GETB(a_row[xi]) - PPM_GETB(b_row[xi]));
9240 delta += abs(a_alpha_row[xi] - b_alpha_row[xi]);
9241 }
9242 }
9243
9244 double average_delta = (double)delta / ((double)a.get_x_size() * (double)b.get_y_size() * (double)a.get_maxval());
9245 return (average_delta <= simple_image_threshold);
9246}
9247
9248/**
9249 * Generates the next mipmap level from the previous one. If there are
9250 * multiple pages (e.g. a cube map), generates each page independently.
9251 *
9252 * x_size and y_size are the size of the previous level. They need not be a
9253 * power of 2, or even a multiple of 2.
9254 *
9255 * Assumes the lock is already held.
9256 */
9257void Texture::
9258do_filter_2d_mipmap_pages(const CData *cdata,
9259 Texture::RamImage &to, const Texture::RamImage &from,
9260 int x_size, int y_size) const {
9261 Filter2DComponent *filter_component;
9262 Filter2DComponent *filter_alpha;
9263
9264 if (is_srgb(cdata->_format)) {
9265 // We currently only support sRGB mipmap generation for unsigned byte
9266 // textures, due to our use of a lookup table.
9267 nassertv(cdata->_component_type == T_unsigned_byte);
9268
9269 if (has_sse2_sRGB_encode()) {
9270 filter_component = &filter_2d_unsigned_byte_srgb_sse2;
9271 } else {
9272 filter_component = &filter_2d_unsigned_byte_srgb;
9273 }
9274
9275 // Alpha is always linear.
9276 filter_alpha = &filter_2d_unsigned_byte;
9277
9278 } else {
9279 switch (cdata->_component_type) {
9280 case T_unsigned_byte:
9281 filter_component = &filter_2d_unsigned_byte;
9282 break;
9283
9284 case T_unsigned_short:
9285 filter_component = &filter_2d_unsigned_short;
9286 break;
9287
9288 case T_float:
9289 filter_component = &filter_2d_float;
9290 break;
9291
9292 default:
9293 gobj_cat.error()
9294 << "Unable to generate mipmaps for 2D texture with component type "
9295 << cdata->_component_type << "!";
9296 return;
9297 }
9298 filter_alpha = filter_component;
9299 }
9300
9301 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9302 size_t row_size = (size_t)x_size * pixel_size;
9303
9304 int to_x_size = max(x_size >> 1, 1);
9305 int to_y_size = max(y_size >> 1, 1);
9306
9307 size_t to_row_size = (size_t)to_x_size * pixel_size;
9308 to._page_size = (size_t)to_y_size * to_row_size;
9309 to._image = PTA_uchar::empty_array(to._page_size * cdata->_z_size * cdata->_num_views, get_class_type());
9310
9311 bool alpha = has_alpha(cdata->_format);
9312 int num_color_components = cdata->_num_components;
9313 if (alpha) {
9314 --num_color_components;
9315 }
9316
9317 int num_pages = cdata->_z_size * cdata->_num_views;
9318 for (int z = 0; z < num_pages; ++z) {
9319 // For each level.
9320 unsigned char *p = to._image.p() + z * to._page_size;
9321 nassertv(p <= to._image.p() + to._image.size() + to._page_size);
9322 const unsigned char *q = from._image.p() + z * from._page_size;
9323 nassertv(q <= from._image.p() + from._image.size() + from._page_size);
9324 if (y_size != 1) {
9325 int y;
9326 for (y = 0; y < y_size - 1; y += 2) {
9327 // For each row.
9328 nassertv(p == to._image.p() + z * to._page_size + (y / 2) * to_row_size);
9329 nassertv(q == from._image.p() + z * from._page_size + y * row_size);
9330 if (x_size != 1) {
9331 int x;
9332 for (x = 0; x < x_size - 1; x += 2) {
9333 // For each pixel.
9334 for (int c = 0; c < num_color_components; ++c) {
9335 // For each component.
9336 filter_component(p, q, pixel_size, row_size);
9337 }
9338 if (alpha) {
9339 filter_alpha(p, q, pixel_size, row_size);
9340 }
9341 q += pixel_size;
9342 }
9343 if (x < x_size) {
9344 // Skip the last odd pixel.
9345 q += pixel_size;
9346 }
9347 } else {
9348 // Just one pixel.
9349 for (int c = 0; c < num_color_components; ++c) {
9350 // For each component.
9351 filter_component(p, q, 0, row_size);
9352 }
9353 if (alpha) {
9354 filter_alpha(p, q, 0, row_size);
9355 }
9356 }
9357 q += row_size;
9359 }
9360 if (y < y_size) {
9361 // Skip the last odd row.
9362 q += row_size;
9363 }
9364 } else {
9365 // Just one row.
9366 if (x_size != 1) {
9367 int x;
9368 for (x = 0; x < x_size - 1; x += 2) {
9369 // For each pixel.
9370 for (int c = 0; c < num_color_components; ++c) {
9371 // For each component.
9372 filter_component(p, q, pixel_size, 0);
9373 }
9374 if (alpha) {
9375 filter_alpha(p, q, pixel_size, 0);
9376 }
9377 q += pixel_size;
9378 }
9379 if (x < x_size) {
9380 // Skip the last odd pixel.
9381 q += pixel_size;
9382 }
9383 } else {
9384 // Just one pixel.
9385 for (int c = 0; c < num_color_components; ++c) {
9386 // For each component.
9387 filter_component(p, q, 0, 0);
9388 }
9389 if (alpha) {
9390 filter_alpha(p, q, pixel_size, 0);
9391 }
9392 }
9393 }
9394
9395 nassertv(p == to._image.p() + (z + 1) * to._page_size);
9396 nassertv(q == from._image.p() + (z + 1) * from._page_size);
9397 }
9398}
9399
9400/**
9401 * Generates the next mipmap level from the previous one, treating all the
9402 * pages of the level as a single 3-d block of pixels.
9403 *
9404 * x_size, y_size, and z_size are the size of the previous level. They need
9405 * not be a power of 2, or even a multiple of 2.
9406 *
9407 * Assumes the lock is already held.
9408 */
9409void Texture::
9410do_filter_3d_mipmap_level(const CData *cdata,
9411 Texture::RamImage &to, const Texture::RamImage &from,
9412 int x_size, int y_size, int z_size) const {
9413 Filter3DComponent *filter_component;
9414 Filter3DComponent *filter_alpha;
9415
9416 if (is_srgb(cdata->_format)) {
9417 // We currently only support sRGB mipmap generation for unsigned byte
9418 // textures, due to our use of a lookup table.
9419 nassertv(cdata->_component_type == T_unsigned_byte);
9420
9421 if (has_sse2_sRGB_encode()) {
9422 filter_component = &filter_3d_unsigned_byte_srgb_sse2;
9423 } else {
9424 filter_component = &filter_3d_unsigned_byte_srgb;
9425 }
9426
9427 // Alpha is always linear.
9428 filter_alpha = &filter_3d_unsigned_byte;
9429
9430 } else {
9431 switch (cdata->_component_type) {
9432 case T_unsigned_byte:
9433 filter_component = &filter_3d_unsigned_byte;
9434 break;
9435
9436 case T_unsigned_short:
9437 filter_component = &filter_3d_unsigned_short;
9438 break;
9439
9440 case T_float:
9441 filter_component = &filter_3d_float;
9442 break;
9443
9444 default:
9445 gobj_cat.error()
9446 << "Unable to generate mipmaps for 3D texture with component type "
9447 << cdata->_component_type << "!";
9448 return;
9449 }
9450 filter_alpha = filter_component;
9451 }
9452
9453 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9454 size_t row_size = (size_t)x_size * pixel_size;
9455 size_t page_size = (size_t)y_size * row_size;
9456 size_t view_size = (size_t)z_size * page_size;
9457
9458 int to_x_size = max(x_size >> 1, 1);
9459 int to_y_size = max(y_size >> 1, 1);
9460 int to_z_size = max(z_size >> 1, 1);
9461
9462 size_t to_row_size = (size_t)to_x_size * pixel_size;
9463 size_t to_page_size = (size_t)to_y_size * to_row_size;
9464 size_t to_view_size = (size_t)to_z_size * to_page_size;
9465 to._page_size = to_page_size;
9466 to._image = PTA_uchar::empty_array(to_page_size * to_z_size * cdata->_num_views, get_class_type());
9467
9468 bool alpha = has_alpha(cdata->_format);
9469 int num_color_components = cdata->_num_components;
9470 if (alpha) {
9471 --num_color_components;
9472 }
9473
9474 for (int view = 0; view < cdata->_num_views; ++view) {
9475 unsigned char *start_to = to._image.p() + view * to_view_size;
9476 const unsigned char *start_from = from._image.p() + view * view_size;
9477 nassertv(start_to + to_view_size <= to._image.p() + to._image.size());
9478 nassertv(start_from + view_size <= from._image.p() + from._image.size());
9479 unsigned char *p = start_to;
9480 const unsigned char *q = start_from;
9481 if (z_size != 1) {
9482 int z;
9483 for (z = 0; z < z_size - 1; z += 2) {
9484 // For each level.
9485 nassertv(p == start_to + (z / 2) * to_page_size);
9486 nassertv(q == start_from + z * page_size);
9487 if (y_size != 1) {
9488 int y;
9489 for (y = 0; y < y_size - 1; y += 2) {
9490 // For each row.
9491 nassertv(p == start_to + (z / 2) * to_page_size + (y / 2) * to_row_size);
9492 nassertv(q == start_from + z * page_size + y * row_size);
9493 if (x_size != 1) {
9494 int x;
9495 for (x = 0; x < x_size - 1; x += 2) {
9496 // For each pixel.
9497 for (int c = 0; c < num_color_components; ++c) {
9498 // For each component.
9499 filter_component(p, q, pixel_size, row_size, page_size);
9500 }
9501 if (alpha) {
9502 filter_alpha(p, q, pixel_size, row_size, page_size);
9503 }
9504 q += pixel_size;
9505 }
9506 if (x < x_size) {
9507 // Skip the last odd pixel.
9508 q += pixel_size;
9509 }
9510 } else {
9511 // Just one pixel.
9512 for (int c = 0; c < num_color_components; ++c) {
9513 // For each component.
9514 filter_component(p, q, 0, row_size, page_size);
9515 }
9516 if (alpha) {
9517 filter_alpha(p, q, 0, row_size, page_size);
9518 }
9519 }
9520 q += row_size;
9522 }
9523 if (y < y_size) {
9524 // Skip the last odd row.
9525 q += row_size;
9526 }
9527 } else {
9528 // Just one row.
9529 if (x_size != 1) {
9530 int x;
9531 for (x = 0; x < x_size - 1; x += 2) {
9532 // For each pixel.
9533 for (int c = 0; c < num_color_components; ++c) {
9534 // For each component.
9535 filter_component(p, q, pixel_size, 0, page_size);
9536 }
9537 if (alpha) {
9538 filter_alpha(p, q, pixel_size, 0, page_size);
9539 }
9540 q += pixel_size;
9541 }
9542 if (x < x_size) {
9543 // Skip the last odd pixel.
9544 q += pixel_size;
9545 }
9546 } else {
9547 // Just one pixel.
9548 for (int c = 0; c < num_color_components; ++c) {
9549 // For each component.
9550 filter_component(p, q, 0, 0, page_size);
9551 }
9552 if (alpha) {
9553 filter_alpha(p, q, 0, 0, page_size);
9554 }
9555 }
9556 }
9557 q += page_size;
9558 }
9559 if (z < z_size) {
9560 // Skip the last odd page.
9561 q += page_size;
9562 }
9563 } else {
9564 // Just one page.
9565 if (y_size != 1) {
9566 int y;
9567 for (y = 0; y < y_size - 1; y += 2) {
9568 // For each row.
9569 nassertv(p == start_to + (y / 2) * to_row_size);
9570 nassertv(q == start_from + y * row_size);
9571 if (x_size != 1) {
9572 int x;
9573 for (x = 0; x < x_size - 1; x += 2) {
9574 // For each pixel.
9575 for (int c = 0; c < num_color_components; ++c) {
9576 // For each component.
9577 filter_component(p, q, pixel_size, row_size, 0);
9578 }
9579 if (alpha) {
9580 filter_alpha(p, q, pixel_size, row_size, 0);
9581 }
9582 q += pixel_size;
9583 }
9584 if (x < x_size) {
9585 // Skip the last odd pixel.
9586 q += pixel_size;
9587 }
9588 } else {
9589 // Just one pixel.
9590 for (int c = 0; c < num_color_components; ++c) {
9591 // For each component.
9592 filter_component(p, q, 0, row_size, 0);
9593 }
9594 if (alpha) {
9595 filter_alpha(p, q, 0, row_size, 0);
9596 }
9597 }
9598 q += row_size;
9600 }
9601 if (y < y_size) {
9602 // Skip the last odd row.
9603 q += row_size;
9604 }
9605 } else {
9606 // Just one row.
9607 if (x_size != 1) {
9608 int x;
9609 for (x = 0; x < x_size - 1; x += 2) {
9610 // For each pixel.
9611 for (int c = 0; c < num_color_components; ++c) {
9612 // For each component.
9613 filter_component(p, q, pixel_size, 0, 0);
9614 }
9615 if (alpha) {
9616 filter_alpha(p, q, pixel_size, 0, 0);
9617 }
9618 q += pixel_size;
9619 }
9620 if (x < x_size) {
9621 // Skip the last odd pixel.
9622 q += pixel_size;
9623 }
9624 } else {
9625 // Just one pixel.
9626 for (int c = 0; c < num_color_components; ++c) {
9627 // For each component.
9628 filter_component(p, q, 0, 0, 0);
9629 }
9630 if (alpha) {
9631 filter_alpha(p, q, 0, 0, 0);
9632 }
9633 }
9634 }
9635 }
9636
9637 nassertv(p == start_to + to_z_size * to_page_size);
9638 nassertv(q == start_from + z_size * page_size);
9639 }
9640}
9641
9642/**
9643 * Averages a 2x2 block of pixel components into a single pixel component, for
9644 * producing the next mipmap level. Increments p and q to the next component.
9645 */
9646void Texture::
9647filter_2d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9648 size_t pixel_size, size_t row_size) {
9649 unsigned int result = ((unsigned int)q[0] +
9650 (unsigned int)q[pixel_size] +
9651 (unsigned int)q[row_size] +
9652 (unsigned int)q[pixel_size + row_size]) >> 2;
9653 *p = (unsigned char)result;
9654 ++p;
9655 ++q;
9656}
9657
9658/**
9659 * Averages a 2x2 block of pixel components into a single pixel component, for
9660 * producing the next mipmap level. Increments p and q to the next component.
9661 */
9662void Texture::
9663filter_2d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9664 size_t pixel_size, size_t row_size) {
9665 float result = (decode_sRGB_float(q[0]) +
9666 decode_sRGB_float(q[pixel_size]) +
9667 decode_sRGB_float(q[row_size]) +
9668 decode_sRGB_float(q[pixel_size + row_size]));
9669
9670 *p = encode_sRGB_uchar(result * 0.25f);
9671 ++p;
9672 ++q;
9673}
9674
9675/**
9676 * Averages a 2x2 block of pixel components into a single pixel component, for
9677 * producing the next mipmap level. Increments p and q to the next component.
9678 */
9679void Texture::
9680filter_2d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9681 size_t pixel_size, size_t row_size) {
9682 float result = (decode_sRGB_float(q[0]) +
9683 decode_sRGB_float(q[pixel_size]) +
9684 decode_sRGB_float(q[row_size]) +
9685 decode_sRGB_float(q[pixel_size + row_size]));
9686
9687 *p = encode_sRGB_uchar_sse2(result * 0.25f);
9688 ++p;
9689 ++q;
9690}
9691
9692/**
9693 * Averages a 2x2 block of pixel components into a single pixel component, for
9694 * producing the next mipmap level. Increments p and q to the next component.
9695 */
9696void Texture::
9697filter_2d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9698 size_t pixel_size, size_t row_size) {
9699 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9700 (unsigned int)*(unsigned short *)&q[pixel_size] +
9701 (unsigned int)*(unsigned short *)&q[row_size] +
9702 (unsigned int)*(unsigned short *)&q[pixel_size + row_size]) >> 2;
9703 store_unscaled_short(p, result);
9704 q += 2;
9705}
9706
9707/**
9708 * Averages a 2x2 block of pixel components into a single pixel component, for
9709 * producing the next mipmap level. Increments p and q to the next component.
9710 */
9711void Texture::
9712filter_2d_float(unsigned char *&p, const unsigned char *&q,
9713 size_t pixel_size, size_t row_size) {
9714 *(float *)p = (*(float *)&q[0] +
9715 *(float *)&q[pixel_size] +
9716 *(float *)&q[row_size] +
9717 *(float *)&q[pixel_size + row_size]) / 4.0f;
9718 p += 4;
9719 q += 4;
9720}
9721
9722/**
9723 * Averages a 2x2x2 block of pixel components into a single pixel component,
9724 * for producing the next mipmap level. Increments p and q to the next
9725 * component.
9726 */
9727void Texture::
9728filter_3d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9729 size_t pixel_size, size_t row_size, size_t page_size) {
9730 unsigned int result = ((unsigned int)q[0] +
9731 (unsigned int)q[pixel_size] +
9732 (unsigned int)q[row_size] +
9733 (unsigned int)q[pixel_size + row_size] +
9734 (unsigned int)q[page_size] +
9735 (unsigned int)q[pixel_size + page_size] +
9736 (unsigned int)q[row_size + page_size] +
9737 (unsigned int)q[pixel_size + row_size + page_size]) >> 3;
9738 *p = (unsigned char)result;
9739 ++p;
9740 ++q;
9741}
9742
9743/**
9744 * Averages a 2x2x2 block of pixel components into a single pixel component,
9745 * for producing the next mipmap level. Increments p and q to the next
9746 * component.
9747 */
9748void Texture::
9749filter_3d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9750 size_t pixel_size, size_t row_size, size_t page_size) {
9751 float result = (decode_sRGB_float(q[0]) +
9752 decode_sRGB_float(q[pixel_size]) +
9753 decode_sRGB_float(q[row_size]) +
9754 decode_sRGB_float(q[pixel_size + row_size]) +
9755 decode_sRGB_float(q[page_size]) +
9756 decode_sRGB_float(q[pixel_size + page_size]) +
9757 decode_sRGB_float(q[row_size + page_size]) +
9758 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9759
9760 *p = encode_sRGB_uchar(result * 0.125f);
9761 ++p;
9762 ++q;
9763}
9764
9765/**
9766 * Averages a 2x2x2 block of pixel components into a single pixel component,
9767 * for producing the next mipmap level. Increments p and q to the next
9768 * component.
9769 */
9770void Texture::
9771filter_3d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9772 size_t pixel_size, size_t row_size, size_t page_size) {
9773 float result = (decode_sRGB_float(q[0]) +
9774 decode_sRGB_float(q[pixel_size]) +
9775 decode_sRGB_float(q[row_size]) +
9776 decode_sRGB_float(q[pixel_size + row_size]) +
9777 decode_sRGB_float(q[page_size]) +
9778 decode_sRGB_float(q[pixel_size + page_size]) +
9779 decode_sRGB_float(q[row_size + page_size]) +
9780 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9781
9782 *p = encode_sRGB_uchar_sse2(result * 0.125f);
9783 ++p;
9784 ++q;
9785}
9786
9787/**
9788 * Averages a 2x2x2 block of pixel components into a single pixel component,
9789 * for producing the next mipmap level. Increments p and q to the next
9790 * component.
9791 */
9792void Texture::
9793filter_3d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9794 size_t pixel_size, size_t row_size,
9795 size_t page_size) {
9796 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9797 (unsigned int)*(unsigned short *)&q[pixel_size] +
9798 (unsigned int)*(unsigned short *)&q[row_size] +
9799 (unsigned int)*(unsigned short *)&q[pixel_size + row_size] +
9800 (unsigned int)*(unsigned short *)&q[page_size] +
9801 (unsigned int)*(unsigned short *)&q[pixel_size + page_size] +
9802 (unsigned int)*(unsigned short *)&q[row_size + page_size] +
9803 (unsigned int)*(unsigned short *)&q[pixel_size + row_size + page_size]) >> 3;
9804 store_unscaled_short(p, result);
9805 q += 2;
9806}
9807
9808/**
9809 * Averages a 2x2x2 block of pixel components into a single pixel component,
9810 * for producing the next mipmap level. Increments p and q to the next
9811 * component.
9812 */
9813void Texture::
9814filter_3d_float(unsigned char *&p, const unsigned char *&q,
9815 size_t pixel_size, size_t row_size, size_t page_size) {
9816 *(float *)p = (*(float *)&q[0] +
9817 *(float *)&q[pixel_size] +
9818 *(float *)&q[row_size] +
9819 *(float *)&q[pixel_size + row_size] +
9820 *(float *)&q[page_size] +
9821 *(float *)&q[pixel_size + page_size] +
9822 *(float *)&q[row_size + page_size] +
9823 *(float *)&q[pixel_size + row_size + page_size]) / 8.0f;
9824 p += 4;
9825 q += 4;
9826}
9827
9828/**
9829 * Invokes the squish library to compress the RAM image(s).
9830 */
9831bool Texture::
9832do_squish(CData *cdata, Texture::CompressionMode compression, int squish_flags) {
9833#ifdef HAVE_SQUISH
9834 if (!do_has_all_ram_mipmap_images(cdata)) {
9835 // If we're about to compress the RAM image, we should ensure that we have
9836 // all of the mipmap levels first.
9837 do_generate_ram_mipmap_images(cdata, false);
9838 }
9839
9840 RamImages compressed_ram_images;
9841 compressed_ram_images.reserve(cdata->_ram_images.size());
9842 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9843 RamImage compressed_image;
9844 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9845 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9846 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9847 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9848 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9849
9850 compressed_image._page_size = page_size;
9851 compressed_image._image = PTA_uchar::empty_array(page_size * num_pages);
9852 for (int z = 0; z < num_pages; ++z) {
9853 unsigned char *dest_page = compressed_image._image.p() + z * page_size;
9854 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * cdata->_ram_images[n]._page_size;
9855 unsigned const char *source_page_end = source_page + cdata->_ram_images[n]._page_size;
9856 // Convert one 4 x 4 cell at a time.
9857 unsigned char *d = dest_page;
9858 for (int y = 0; y < y_size; y += 4) {
9859 for (int x = 0; x < x_size; x += 4) {
9860 unsigned char tb[16 * 4];
9861 int mask = 0;
9862 unsigned char *t = tb;
9863 for (int i = 0; i < 16; ++i) {
9864 int xi = x + i % 4;
9865 int yi = y + i / 4;
9866 unsigned const char *s = source_page + (yi * x_size + xi) * cdata->_num_components;
9867 if (s < source_page_end) {
9868 switch (cdata->_num_components) {
9869 case 1:
9870 t[0] = s[0]; // r
9871 t[1] = s[0]; // g
9872 t[2] = s[0]; // b
9873 t[3] = 255; // a
9874 break;
9875
9876 case 2:
9877 t[0] = s[0]; // r
9878 t[1] = s[0]; // g
9879 t[2] = s[0]; // b
9880 t[3] = s[1]; // a
9881 break;
9882
9883 case 3:
9884 t[0] = s[2]; // r
9885 t[1] = s[1]; // g
9886 t[2] = s[0]; // b
9887 t[3] = 255; // a
9888 break;
9889
9890 case 4:
9891 t[0] = s[2]; // r
9892 t[1] = s[1]; // g
9893 t[2] = s[0]; // b
9894 t[3] = s[3]; // a
9895 break;
9896 }
9897 mask |= (1 << i);
9898 }
9899 t += 4;
9900 }
9901 squish::CompressMasked(tb, mask, d, squish_flags);
9902 d += cell_size;
9904 }
9905 }
9906 }
9907 compressed_ram_images.push_back(compressed_image);
9908 }
9909 cdata->_ram_images.swap(compressed_ram_images);
9910 cdata->_ram_image_compression = compression;
9911 return true;
9912
9913#else // HAVE_SQUISH
9914 return false;
9915
9916#endif // HAVE_SQUISH
9917}
9918
9919/**
9920 * Invokes the squish library to uncompress the RAM image(s).
9921 */
9922bool Texture::
9923do_unsquish(CData *cdata, int squish_flags) {
9924#ifdef HAVE_SQUISH
9925 RamImages uncompressed_ram_images;
9926 uncompressed_ram_images.reserve(cdata->_ram_images.size());
9927 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9928 RamImage uncompressed_image;
9929 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9930 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9931 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9932 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9933 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9934
9935 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
9936 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
9937 for (int z = 0; z < num_pages; ++z) {
9938 unsigned char *dest_page = uncompressed_image._image.p() + z * uncompressed_image._page_size;
9939 unsigned char *dest_page_end = dest_page + uncompressed_image._page_size;
9940 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * page_size;
9941 // Unconvert one 4 x 4 cell at a time.
9942 unsigned const char *s = source_page;
9943 for (int y = 0; y < y_size; y += 4) {
9944 for (int x = 0; x < x_size; x += 4) {
9945 unsigned char tb[16 * 4];
9946 squish::Decompress(tb, s, squish_flags);
9947 s += cell_size;
9948
9949 unsigned char *t = tb;
9950 for (int i = 0; i < 16; ++i) {
9951 int xi = x + i % 4;
9952 int yi = y + i / 4;
9953 unsigned char *d = dest_page + (yi * x_size + xi) * cdata->_num_components;
9954 if (d < dest_page_end) {
9955 switch (cdata->_num_components) {
9956 case 1:
9957 d[0] = t[1]; // g
9958 break;
9959
9960 case 2:
9961 d[0] = t[1]; // g
9962 d[1] = t[3]; // a
9963 break;
9964
9965 case 3:
9966 d[2] = t[0]; // r
9967 d[1] = t[1]; // g
9968 d[0] = t[2]; // b
9969 break;
9970
9971 case 4:
9972 d[2] = t[0]; // r
9973 d[1] = t[1]; // g
9974 d[0] = t[2]; // b
9975 d[3] = t[3]; // a
9976 break;
9977 }
9978 }
9979 t += 4;
9980 }
9981 }
9983 }
9984 }
9985 uncompressed_ram_images.push_back(uncompressed_image);
9986 }
9987 cdata->_ram_images.swap(uncompressed_ram_images);
9988 cdata->_ram_image_compression = CM_off;
9989 return true;
9990
9991#else // HAVE_SQUISH
9992 return false;
9993
9994#endif // HAVE_SQUISH
9995}
9996
9997/**
9998 * Factory method to generate a Texture object
9999 */
10002 BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
10003}
10004
10005/**
10006 * Function to write the important information in the particular object to a
10007 * Datagram
10008 */
10010write_datagram(BamWriter *manager, Datagram &me) {
10011 CDWriter cdata(_cycler, false);
10012
10013 bool has_rawdata = false;
10014 do_write_datagram_header(cdata, manager, me, has_rawdata);
10015 do_write_datagram_body(cdata, manager, me);
10016
10017 // If we are also including the texture's image data, then stuff it in here.
10018 if (has_rawdata) {
10019 do_write_datagram_rawdata(cdata, manager, me);
10020 }
10021}
10022
10023/**
10024 * Called by the BamReader to perform any final actions needed for setting up
10025 * the object after all objects have been read and all pointers have been
10026 * completed.
10027 */
10030 // Unref the pointer that we explicitly reffed in make_from_bam().
10031 unref();
10032
10033 // We should never get back to zero after unreffing our own count, because
10034 // we expect to have been stored in a pointer somewhere. If we do get to
10035 // zero, it's a memory leak; the way to avoid this is to call unref_delete()
10036 // above instead of unref(), but this is dangerous to do from within a
10037 // virtual function.
10038 nassertv(get_ref_count() != 0);
10039}
10040
10041
10042/**
10043 * Writes the header part of the texture to the Datagram. This is the common
10044 * part that is shared by all Texture subclasses, and contains the filename
10045 * and rawdata flags. This method is not virtual because all Texture
10046 * subclasses must write the same data at this step.
10047 *
10048 * This part must be read first before calling do_fillin_body() to determine
10049 * whether to load the Texture from the TexturePool or directly from the bam
10050 * stream.
10051 *
10052 * After this call, has_rawdata will be filled with either true or false,
10053 * according to whether we expect to write the texture rawdata to the bam
10054 * stream following the texture body.
10055 */
10056void Texture::
10057do_write_datagram_header(CData *cdata, BamWriter *manager, Datagram &me, bool &has_rawdata) {
10058 // Write out the texture's raw pixel data if (a) the current Bam Texture
10059 // Mode requires that, or (b) there's no filename, so the file can't be
10060 // loaded up from disk, but the raw pixel data is currently available in
10061 // RAM.
10062
10063 // Otherwise, we just write out the filename, and assume whoever loads the
10064 // bam file later will have access to the image file on disk.
10065 BamWriter::BamTextureMode file_texture_mode = manager->get_file_texture_mode();
10066 has_rawdata = (file_texture_mode == BamWriter::BTM_rawdata ||
10067 (cdata->_filename.empty() && do_has_bam_rawdata(cdata)));
10068 if (has_rawdata && !do_has_bam_rawdata(cdata)) {
10069 do_get_bam_rawdata(cdata);
10070 if (!do_has_bam_rawdata(cdata)) {
10071 // No image data after all.
10072 has_rawdata = false;
10073 }
10074 }
10075
10076 bool has_bam_dir = !manager->get_filename().empty();
10077 Filename bam_dir = manager->get_filename().get_dirname();
10078 Filename filename = cdata->_filename;
10079 Filename alpha_filename = cdata->_alpha_filename;
10080
10082
10083 switch (file_texture_mode) {
10084 case BamWriter::BTM_unchanged:
10085 case BamWriter::BTM_rawdata:
10086 break;
10087
10088 case BamWriter::BTM_fullpath:
10089 filename = cdata->_fullpath;
10090 alpha_filename = cdata->_alpha_fullpath;
10091 break;
10092
10093 case BamWriter::BTM_relative:
10094 filename = cdata->_fullpath;
10095 alpha_filename = cdata->_alpha_fullpath;
10096 bam_dir.make_absolute(vfs->get_cwd());
10097 if (!has_bam_dir || !filename.make_relative_to(bam_dir, true)) {
10098 filename.find_on_searchpath(get_model_path());
10099 }
10100 if (gobj_cat.is_debug()) {
10101 gobj_cat.debug()
10102 << "Texture file " << cdata->_fullpath
10103 << " found as " << filename << "\n";
10104 }
10105 if (!has_bam_dir || !alpha_filename.make_relative_to(bam_dir, true)) {
10106 alpha_filename.find_on_searchpath(get_model_path());
10107 }
10108 if (gobj_cat.is_debug()) {
10109 gobj_cat.debug()
10110 << "Alpha image " << cdata->_alpha_fullpath
10111 << " found as " << alpha_filename << "\n";
10112 }
10113 break;
10114
10115 case BamWriter::BTM_basename:
10116 filename = cdata->_fullpath.get_basename();
10117 alpha_filename = cdata->_alpha_fullpath.get_basename();
10118 break;
10119
10120 default:
10121 gobj_cat.error()
10122 << "Unsupported bam-texture-mode: " << (int)file_texture_mode << "\n";
10123 }
10124
10125 if (filename.empty()) {
10126 if (do_has_bam_rawdata(cdata) || cdata->_has_clear_color) {
10127 // If we don't have a filename, we have to store rawdata anyway.
10128 has_rawdata = true;
10129 }
10130 }
10131
10132 me.add_string(get_name());
10133 me.add_string(filename);
10134 me.add_string(alpha_filename);
10135 me.add_uint8(cdata->_primary_file_num_channels);
10136 me.add_uint8(cdata->_alpha_file_channel);
10137 me.add_bool(has_rawdata);
10138
10139 if (manager->get_file_minor_ver() < 25 &&
10140 cdata->_texture_type == TT_cube_map) {
10141 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10142 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10143 me.add_uint8(TT_2d_texture_array);
10144 } else {
10145 me.add_uint8(cdata->_texture_type);
10146 }
10147
10148 if (manager->get_file_minor_ver() >= 32) {
10149 me.add_bool(cdata->_has_read_mipmaps);
10150 }
10151}
10152
10153/**
10154 * Writes the body part of the texture to the Datagram. This is generally all
10155 * of the texture parameters except for the header and the rawdata.
10156 */
10157void Texture::
10158do_write_datagram_body(CData *cdata, BamWriter *manager, Datagram &me) {
10159 if (manager->get_file_minor_ver() >= 36) {
10160 cdata->_default_sampler.write_datagram(me);
10161 } else {
10162 const SamplerState &s = cdata->_default_sampler;
10163 me.add_uint8(s.get_wrap_u());
10164 me.add_uint8(s.get_wrap_v());
10165 me.add_uint8(s.get_wrap_w());
10166 me.add_uint8(s.get_minfilter());
10167 me.add_uint8(s.get_magfilter());
10169 s.get_border_color().write_datagram(me);
10170 }
10171
10172 me.add_uint8(cdata->_compression);
10173 me.add_uint8(cdata->_quality_level);
10174
10175 me.add_uint8(cdata->_format);
10176 me.add_uint8(cdata->_num_components);
10177
10178 if (cdata->_texture_type == TT_buffer_texture) {
10179 me.add_uint8(cdata->_usage_hint);
10180 }
10181
10182 if (manager->get_file_minor_ver() >= 28) {
10183 me.add_uint8(cdata->_auto_texture_scale);
10184 }
10185 me.add_uint32(cdata->_orig_file_x_size);
10186 me.add_uint32(cdata->_orig_file_y_size);
10187
10188 bool has_simple_ram_image = !cdata->_simple_ram_image._image.empty();
10190
10191 // Write out the simple image too, so it will be available later.
10193 me.add_uint32(cdata->_simple_x_size);
10194 me.add_uint32(cdata->_simple_y_size);
10195 me.add_int32(cdata->_simple_image_date_generated);
10196 me.add_uint32(cdata->_simple_ram_image._image.size());
10197 me.append_data(cdata->_simple_ram_image._image, cdata->_simple_ram_image._image.size());
10198 }
10199
10200 if (manager->get_file_minor_ver() >= 45) {
10201 me.add_bool(cdata->_has_clear_color);
10202 if (cdata->_has_clear_color) {
10203 cdata->_clear_color.write_datagram(me);
10204 }
10205 }
10206}
10207
10208/**
10209 * Writes the rawdata part of the texture to the Datagram.
10210 */
10211void Texture::
10212do_write_datagram_rawdata(CData *cdata, BamWriter *manager, Datagram &me) {
10213 me.add_uint32(cdata->_x_size);
10214 me.add_uint32(cdata->_y_size);
10215 me.add_uint32(cdata->_z_size);
10216
10217 if (manager->get_file_minor_ver() >= 30) {
10218 me.add_uint32(cdata->_pad_x_size);
10219 me.add_uint32(cdata->_pad_y_size);
10220 me.add_uint32(cdata->_pad_z_size);
10221 }
10222
10223 if (manager->get_file_minor_ver() >= 26) {
10224 me.add_uint32(cdata->_num_views);
10225 }
10226 me.add_uint8(cdata->_component_type);
10227 me.add_uint8(cdata->_component_width);
10228 me.add_uint8(cdata->_ram_image_compression);
10229
10230 if (cdata->_ram_images.empty() && cdata->_has_clear_color &&
10231 manager->get_file_minor_ver() < 45) {
10232 // For older .bam versions that don't support clear colors, make up a RAM
10233 // image.
10234 int image_size = do_get_expected_ram_image_size(cdata);
10235 me.add_uint8(1);
10236 me.add_uint32(do_get_expected_ram_page_size(cdata));
10237 me.add_uint32(image_size);
10238
10239 // Fill the image with the clear color.
10240 unsigned char pixel[16];
10241 const int pixel_size = do_get_clear_data(cdata, pixel);
10242 nassertv(pixel_size > 0);
10243
10244 for (int i = 0; i < image_size; i += pixel_size) {
10245 me.append_data(pixel, pixel_size);
10246 }
10247 } else {
10248 me.add_uint8(cdata->_ram_images.size());
10249 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
10250 me.add_uint32(cdata->_ram_images[n]._page_size);
10251 me.add_uint32(cdata->_ram_images[n]._image.size());
10252 me.append_data(cdata->_ram_images[n]._image, cdata->_ram_images[n]._image.size());
10253 }
10254 }
10255}
10256
10257/**
10258 * Factory method to generate a Texture object
10259 */
10260TypedWritable *Texture::
10261make_from_bam(const FactoryParams &params) {
10262 PT(Texture) dummy = new Texture;
10263 return dummy->make_this_from_bam(params);
10264}
10265
10266/**
10267 * Called by make_from_bam() once the particular subclass of Texture is known.
10268 * This is called on a newly-constructed Texture object of the appropriate
10269 * subclass. It will return either the same Texture object (e.g. this), or a
10270 * different Texture object loaded via the TexturePool, as appropriate.
10271 */
10272TypedWritable *Texture::
10273make_this_from_bam(const FactoryParams &params) {
10274 // The process of making a texture is slightly different than making other
10275 // TypedWritable objects. That is because all creation of Textures should
10276 // be done through calls to TexturePool, which ensures that any loads of the
10277 // same filename refer to the same memory.
10278
10279 DatagramIterator scan;
10280 BamReader *manager;
10281
10282 parse_params(params, scan, manager);
10283
10284 // Get the header information--the filenames and texture type--so we can
10285 // look up the file on disk first.
10286 string name = scan.get_string();
10287 Filename filename = scan.get_string();
10288 Filename alpha_filename = scan.get_string();
10289
10290 int primary_file_num_channels = scan.get_uint8();
10291 int alpha_file_channel = scan.get_uint8();
10292 bool has_rawdata = scan.get_bool();
10293 TextureType texture_type = (TextureType)scan.get_uint8();
10294 if (manager->get_file_minor_ver() < 25) {
10295 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10296 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10297 if (texture_type == TT_2d_texture_array) {
10298 texture_type = TT_cube_map;
10299 }
10300 }
10301 bool has_read_mipmaps = false;
10302 if (manager->get_file_minor_ver() >= 32) {
10303 has_read_mipmaps = scan.get_bool();
10304 }
10305
10306 Texture *me = nullptr;
10307 if (has_rawdata) {
10308 // If the raw image data is included, then just load the texture directly
10309 // from the stream, and return it. In this case we return the "this"
10310 // pointer, since it's a newly-created Texture object of the appropriate
10311 // type.
10312 me = this;
10313 me->set_name(name);
10314 CDWriter cdata_me(me->_cycler, true);
10315 cdata_me->_filename = filename;
10316 cdata_me->_alpha_filename = alpha_filename;
10317 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10318 cdata_me->_alpha_file_channel = alpha_file_channel;
10319 cdata_me->_texture_type = texture_type;
10320 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10321
10322 // Read the texture attributes directly from the bam stream.
10323 me->do_fillin_body(cdata_me, scan, manager);
10324 me->do_fillin_rawdata(cdata_me, scan, manager);
10325
10326 // To manage the reference count, explicitly ref it now, then unref it in
10327 // the finalize callback.
10328 me->ref();
10329 manager->register_finalize(me);
10330
10331 } else {
10332 // The raw image data isn't included, so we'll be loading the Texture via
10333 // the TexturePool. In this case we use the "this" pointer as a temporary
10334 // object to read all of the attributes from the bam stream.
10335 Texture *dummy = this;
10336 AutoTextureScale auto_texture_scale = ATS_unspecified;
10337 bool has_simple_ram_image = false;
10338 {
10339 CDWriter cdata_dummy(dummy->_cycler, true);
10340 dummy->do_fillin_body(cdata_dummy, scan, manager);
10341 auto_texture_scale = cdata_dummy->_auto_texture_scale;
10342 has_simple_ram_image = !cdata_dummy->_simple_ram_image._image.empty();
10343 }
10344
10345 if (filename.empty()) {
10346 // This texture has no filename; since we don't have an image to load,
10347 // we can't actually create the texture.
10348 gobj_cat.info()
10349 << "Cannot create texture '" << name << "' with no filename.\n";
10350
10351 } else {
10352 // This texture does have a filename, so try to load it from disk.
10354 if (!manager->get_filename().empty()) {
10355 // If texture filename was given relative to the bam filename, expand
10356 // it now.
10357 Filename bam_dir = manager->get_filename().get_dirname();
10358 vfs->resolve_filename(filename, bam_dir);
10359 if (!alpha_filename.empty()) {
10360 vfs->resolve_filename(alpha_filename, bam_dir);
10361 }
10362 }
10363
10364 LoaderOptions options = manager->get_loader_options();
10365 if (dummy->uses_mipmaps()) {
10366 options.set_texture_flags(options.get_texture_flags() | LoaderOptions::TF_generate_mipmaps);
10367 }
10368 options.set_auto_texture_scale(auto_texture_scale);
10369
10370 switch (texture_type) {
10371 case TT_buffer_texture:
10372 case TT_1d_texture:
10373 case TT_2d_texture:
10374 case TT_1d_texture_array:
10375 // If we don't want to preload textures, and we already have a simple
10376 // RAM image (or don't need one), we don't need to load it from disk.
10377 // We do check for it in the texture pool first, though, in case it has
10378 // already been loaded.
10379 if ((options.get_texture_flags() & LoaderOptions::TF_preload) == 0 &&
10380 (has_simple_ram_image || (options.get_texture_flags() & LoaderOptions::TF_preload_simple) == 0)) {
10381 if (alpha_filename.empty()) {
10382 me = TexturePool::get_texture(filename, primary_file_num_channels,
10383 has_read_mipmaps);
10384 } else {
10385 me = TexturePool::get_texture(filename, alpha_filename,
10386 primary_file_num_channels,
10387 alpha_file_channel,
10388 has_read_mipmaps);
10389 }
10390 if (me != nullptr && me->get_texture_type() == texture_type) {
10391 // We can use this.
10392 break;
10393 }
10394
10395 // We don't have a texture, but we didn't need to preload it, so we
10396 // can just use this one. We just need to know where we can find it
10397 // when we do need to reload it.
10398 Filename fullpath = filename;
10399 Filename alpha_fullpath = alpha_filename;
10400 const DSearchPath &model_path = get_model_path();
10401 if (vfs->resolve_filename(fullpath, model_path) &&
10402 (alpha_fullpath.empty() || vfs->resolve_filename(alpha_fullpath, model_path))) {
10403 me = dummy;
10404 me->set_name(name);
10405
10406 {
10407 CDWriter cdata_me(me->_cycler, true);
10408 cdata_me->_filename = filename;
10409 cdata_me->_alpha_filename = alpha_filename;
10410 cdata_me->_fullpath = fullpath;
10411 cdata_me->_alpha_fullpath = alpha_fullpath;
10412 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10413 cdata_me->_alpha_file_channel = alpha_file_channel;
10414 cdata_me->_texture_type = texture_type;
10415 cdata_me->_loaded_from_image = true;
10416 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10417 }
10418
10419 // To manage the reference count, explicitly ref it now, then unref
10420 // it in the finalize callback.
10421 me->ref();
10422 manager->register_finalize(me);
10423
10424 // Do add it to the cache now, so that future uses of this same
10425 // texture are unified.
10427 return me;
10428 }
10429 }
10430 if (alpha_filename.empty()) {
10431 me = TexturePool::load_texture(filename, primary_file_num_channels,
10432 has_read_mipmaps, options);
10433 } else {
10434 me = TexturePool::load_texture(filename, alpha_filename,
10435 primary_file_num_channels,
10436 alpha_file_channel,
10437 has_read_mipmaps, options);
10438 }
10439 break;
10440
10441 case TT_3d_texture:
10442 me = TexturePool::load_3d_texture(filename, has_read_mipmaps, options);
10443 break;
10444
10445 case TT_2d_texture_array:
10446 case TT_cube_map_array:
10447 me = TexturePool::load_2d_texture_array(filename, has_read_mipmaps, options);
10448 break;
10449
10450 case TT_cube_map:
10451 me = TexturePool::load_cube_map(filename, has_read_mipmaps, options);
10452 break;
10453 }
10454 }
10455
10456 if (me != nullptr) {
10457 me->set_name(name);
10458 CDWriter cdata_me(me->_cycler, true);
10459 me->do_fillin_from(cdata_me, dummy);
10460
10461 // Since in this case me was loaded from the TexturePool, there's no
10462 // need to explicitly manage the reference count. TexturePool will hold
10463 // it safely.
10464 }
10465 }
10466
10467 return me;
10468}
10469
10470/**
10471 * Reads in the part of the Texture that was written with
10472 * do_write_datagram_body().
10473 */
10474void Texture::
10475do_fillin_body(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10476 cdata->_default_sampler.read_datagram(scan, manager);
10477
10478 if (manager->get_file_minor_ver() >= 1) {
10479 cdata->_compression = (CompressionMode)scan.get_uint8();
10480 }
10481 if (manager->get_file_minor_ver() >= 16) {
10482 cdata->_quality_level = (QualityLevel)scan.get_uint8();
10483 }
10484
10485 cdata->_format = (Format)scan.get_uint8();
10486 cdata->_num_components = scan.get_uint8();
10487
10488 if (cdata->_texture_type == TT_buffer_texture) {
10489 cdata->_usage_hint = (GeomEnums::UsageHint)scan.get_uint8();
10490 }
10491
10492 cdata->inc_properties_modified();
10493
10494 cdata->_auto_texture_scale = ATS_unspecified;
10495 if (manager->get_file_minor_ver() >= 28) {
10496 cdata->_auto_texture_scale = (AutoTextureScale)scan.get_uint8();
10497 }
10498
10499 bool has_simple_ram_image = false;
10500 if (manager->get_file_minor_ver() >= 18) {
10501 cdata->_orig_file_x_size = scan.get_uint32();
10502 cdata->_orig_file_y_size = scan.get_uint32();
10503
10505 }
10506
10508 cdata->_simple_x_size = scan.get_uint32();
10509 cdata->_simple_y_size = scan.get_uint32();
10510 cdata->_simple_image_date_generated = scan.get_int32();
10511
10512 size_t u_size = scan.get_uint32();
10513
10514 // Protect against large allocation.
10515 if (u_size > scan.get_remaining_size()) {
10516 gobj_cat.error()
10517 << "simple RAM image extends past end of datagram, is texture corrupt?\n";
10518 return;
10519 }
10520
10521 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10522 scan.extract_bytes(image.p(), u_size);
10523
10524 cdata->_simple_ram_image._image = image;
10525 cdata->_simple_ram_image._page_size = u_size;
10526 cdata->inc_simple_image_modified();
10527 }
10528
10529 if (manager->get_file_minor_ver() >= 45) {
10530 cdata->_has_clear_color = scan.get_bool();
10531 if (cdata->_has_clear_color) {
10532 cdata->_clear_color.read_datagram(scan);
10533 }
10534 }
10535}
10536
10537/**
10538 * Reads in the part of the Texture that was written with
10539 * do_write_datagram_rawdata().
10540 */
10541void Texture::
10542do_fillin_rawdata(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10543 cdata->_x_size = scan.get_uint32();
10544 cdata->_y_size = scan.get_uint32();
10545 cdata->_z_size = scan.get_uint32();
10546
10547 if (manager->get_file_minor_ver() >= 30) {
10548 cdata->_pad_x_size = scan.get_uint32();
10549 cdata->_pad_y_size = scan.get_uint32();
10550 cdata->_pad_z_size = scan.get_uint32();
10551 } else {
10552 do_set_pad_size(cdata, 0, 0, 0);
10553 }
10554
10555 cdata->_num_views = 1;
10556 if (manager->get_file_minor_ver() >= 26) {
10557 cdata->_num_views = scan.get_uint32();
10558 }
10559 cdata->_component_type = (ComponentType)scan.get_uint8();
10560 cdata->_component_width = scan.get_uint8();
10561 cdata->_ram_image_compression = CM_off;
10562 if (manager->get_file_minor_ver() >= 1) {
10563 cdata->_ram_image_compression = (CompressionMode)scan.get_uint8();
10564 }
10565
10566 int num_ram_images = 1;
10567 if (manager->get_file_minor_ver() >= 3) {
10568 num_ram_images = scan.get_uint8();
10569 }
10570
10571 cdata->_ram_images.clear();
10572 cdata->_ram_images.reserve(num_ram_images);
10573 for (int n = 0; n < num_ram_images; ++n) {
10574 cdata->_ram_images.push_back(RamImage());
10575 cdata->_ram_images[n]._page_size = get_expected_ram_page_size();
10576 if (manager->get_file_minor_ver() >= 1) {
10577 cdata->_ram_images[n]._page_size = scan.get_uint32();
10578 }
10579
10580 // fill the cdata->_image buffer with image data
10581 size_t u_size = scan.get_uint32();
10582
10583 // Protect against large allocation.
10584 if (u_size > scan.get_remaining_size()) {
10585 gobj_cat.error()
10586 << "RAM image " << n << " extends past end of datagram, is texture corrupt?\n";
10587 return;
10588 }
10589
10590 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10591 scan.extract_bytes(image.p(), u_size);
10592
10593 cdata->_ram_images[n]._image = image;
10594 }
10595 cdata->_loaded_from_image = true;
10596 cdata->inc_image_modified();
10597}
10598
10599/**
10600 * Called in make_from_bam(), this method properly copies the attributes from
10601 * the bam stream (as stored in dummy) into this texture, updating the
10602 * modified flags appropriately.
10603 */
10604void Texture::
10605do_fillin_from(CData *cdata, const Texture *dummy) {
10606 // Use the setters instead of setting these directly, so we can correctly
10607 // avoid incrementing cdata->_properties_modified if none of these actually
10608 // change. (Otherwise, we'd have to reload the texture to the GSG every
10609 // time we loaded a new bam file that reference the texture, since each bam
10610 // file reference passes through this function.)
10611
10612 CDReader cdata_dummy(dummy->_cycler);
10613
10614 do_set_wrap_u(cdata, cdata_dummy->_default_sampler.get_wrap_u());
10615 do_set_wrap_v(cdata, cdata_dummy->_default_sampler.get_wrap_v());
10616 do_set_wrap_w(cdata, cdata_dummy->_default_sampler.get_wrap_w());
10617 do_set_border_color(cdata, cdata_dummy->_default_sampler.get_border_color());
10618
10619 if (cdata_dummy->_default_sampler.get_minfilter() != SamplerState::FT_default) {
10620 do_set_minfilter(cdata, cdata_dummy->_default_sampler.get_minfilter());
10621 }
10622 if (cdata_dummy->_default_sampler.get_magfilter() != SamplerState::FT_default) {
10623 do_set_magfilter(cdata, cdata_dummy->_default_sampler.get_magfilter());
10624 }
10625 if (cdata_dummy->_default_sampler.get_anisotropic_degree() != 0) {
10626 do_set_anisotropic_degree(cdata, cdata_dummy->_default_sampler.get_anisotropic_degree());
10627 }
10628 if (cdata_dummy->_compression != CM_default) {
10629 do_set_compression(cdata, cdata_dummy->_compression);
10630 }
10631 if (cdata_dummy->_quality_level != QL_default) {
10632 do_set_quality_level(cdata, cdata_dummy->_quality_level);
10633 }
10634
10635 Format format = cdata_dummy->_format;
10636 int num_components = cdata_dummy->_num_components;
10637
10638 if (num_components == cdata->_num_components) {
10639 // Only reset the format if the number of components hasn't changed, since
10640 // if the number of components has changed our texture no longer matches
10641 // what it was when the bam was written.
10642 do_set_format(cdata, format);
10643 }
10644
10645 if (!cdata_dummy->_simple_ram_image._image.empty()) {
10646 // Only replace the simple ram image if it was generated more recently
10647 // than the one we already have.
10648 if (cdata->_simple_ram_image._image.empty() ||
10649 cdata_dummy->_simple_image_date_generated > cdata->_simple_image_date_generated) {
10650 do_set_simple_ram_image(cdata,
10651 cdata_dummy->_simple_ram_image._image,
10652 cdata_dummy->_simple_x_size,
10653 cdata_dummy->_simple_y_size);
10654 cdata->_simple_image_date_generated = cdata_dummy->_simple_image_date_generated;
10655 }
10656 }
10657}
10658
10659/**
10660 *
10661 */
10662Texture::CData::
10663CData() {
10664 _primary_file_num_channels = 0;
10665 _alpha_file_channel = 0;
10666 _keep_ram_image = true;
10667 _compression = CM_default;
10668 _auto_texture_scale = ATS_unspecified;
10669 _ram_image_compression = CM_off;
10670 _render_to_texture = false;
10671 _match_framebuffer_format = false;
10672 _post_load_store_cache = false;
10673 _quality_level = QL_default;
10674
10675 _texture_type = TT_2d_texture;
10676 _x_size = 0;
10677 _y_size = 1;
10678 _z_size = 1;
10679 _num_views = 1;
10680
10681 // We will override the format in a moment (in the Texture constructor), but
10682 // set it to something else first to avoid the check in do_set_format
10683 // depending on an uninitialized value.
10684 _format = F_rgba;
10685
10686 // Only used for buffer textures.
10687 _usage_hint = GeomEnums::UH_unspecified;
10688
10689 _pad_x_size = 0;
10690 _pad_y_size = 0;
10691 _pad_z_size = 0;
10692
10693 _orig_file_x_size = 0;
10694 _orig_file_y_size = 0;
10695
10696 _loaded_from_image = false;
10697 _loaded_from_txo = false;
10698 _has_read_pages = false;
10699 _has_read_mipmaps = false;
10700 _num_mipmap_levels_read = 0;
10701
10702 _simple_x_size = 0;
10703 _simple_y_size = 0;
10704 _simple_ram_image._page_size = 0;
10705
10706 _has_clear_color = false;
10707}
10708
10709/**
10710 *
10711 */
10712Texture::CData::
10713CData(const Texture::CData &copy) {
10714 _num_mipmap_levels_read = 0;
10715
10716 do_assign(&copy);
10717
10718 _properties_modified = copy._properties_modified;
10719 _image_modified = copy._image_modified;
10720 _simple_image_modified = copy._simple_image_modified;
10721}
10722
10723/**
10724 *
10725 */
10726CycleData *Texture::CData::
10727make_copy() const {
10728 return new CData(*this);
10729}
10730
10731/**
10732 *
10733 */
10734void Texture::CData::
10735do_assign(const Texture::CData *copy) {
10736 _filename = copy->_filename;
10737 _alpha_filename = copy->_alpha_filename;
10738 if (!copy->_fullpath.empty()) {
10739 // Since the fullpath is often empty on a file loaded directly from a txo,
10740 // we only assign the fullpath if it is not empty.
10741 _fullpath = copy->_fullpath;
10742 _alpha_fullpath = copy->_alpha_fullpath;
10743 }
10744 _primary_file_num_channels = copy->_primary_file_num_channels;
10745 _alpha_file_channel = copy->_alpha_file_channel;
10746 _x_size = copy->_x_size;
10747 _y_size = copy->_y_size;
10748 _z_size = copy->_z_size;
10749 _num_views = copy->_num_views;
10750 _pad_x_size = copy->_pad_x_size;
10751 _pad_y_size = copy->_pad_y_size;
10752 _pad_z_size = copy->_pad_z_size;
10753 _orig_file_x_size = copy->_orig_file_x_size;
10754 _orig_file_y_size = copy->_orig_file_y_size;
10755 _num_components = copy->_num_components;
10756 _component_width = copy->_component_width;
10757 _texture_type = copy->_texture_type;
10758 _format = copy->_format;
10759 _component_type = copy->_component_type;
10760 _loaded_from_image = copy->_loaded_from_image;
10761 _loaded_from_txo = copy->_loaded_from_txo;
10762 _has_read_pages = copy->_has_read_pages;
10763 _has_read_mipmaps = copy->_has_read_mipmaps;
10764 _num_mipmap_levels_read = copy->_num_mipmap_levels_read;
10765 _default_sampler = copy->_default_sampler;
10766 _keep_ram_image = copy->_keep_ram_image;
10767 _compression = copy->_compression;
10768 _match_framebuffer_format = copy->_match_framebuffer_format;
10769 _quality_level = copy->_quality_level;
10770 _auto_texture_scale = copy->_auto_texture_scale;
10771 _ram_image_compression = copy->_ram_image_compression;
10772 _ram_images = copy->_ram_images;
10773 _simple_x_size = copy->_simple_x_size;
10774 _simple_y_size = copy->_simple_y_size;
10775 _simple_ram_image = copy->_simple_ram_image;
10776}
10777
10778/**
10779 * Writes the contents of this object to the datagram for shipping out to a
10780 * Bam file.
10781 */
10782void Texture::CData::
10783write_datagram(BamWriter *manager, Datagram &dg) const {
10784}
10785
10786/**
10787 * Receives an array of pointers, one for each time manager->read_pointer()
10788 * was called in fillin(). Returns the number of pointers processed.
10789 */
10790int Texture::CData::
10791complete_pointers(TypedWritable **p_list, BamReader *manager) {
10792 return 0;
10793}
10794
10795/**
10796 * This internal function is called by make_from_bam to read in all of the
10797 * relevant data from the BamFile for the new Geom.
10798 */
10799void Texture::CData::
10800fillin(DatagramIterator &scan, BamReader *manager) {
10801}
10802
10803/**
10804 *
10805 */
10806ostream &
10807operator << (ostream &out, Texture::TextureType tt) {
10808 return out << Texture::format_texture_type(tt);
10809}
10810
10811/**
10812 *
10813 */
10814ostream &
10815operator << (ostream &out, Texture::ComponentType ct) {
10816 return out << Texture::format_component_type(ct);
10817}
10818
10819/**
10820 *
10821 */
10822ostream &
10823operator << (ostream &out, Texture::Format f) {
10824 return out << Texture::format_format(f);
10825}
10826
10827/**
10828 *
10829 */
10830ostream &
10831operator << (ostream &out, Texture::CompressionMode cm) {
10832 return out << Texture::format_compression_mode(cm);
10833}
10834
10835/**
10836 *
10837 */
10838ostream &
10839operator << (ostream &out, Texture::QualityLevel tql) {
10840 return out << Texture::format_quality_level(tql);
10841}
10842
10843/**
10844 *
10845 */
10846istream &
10847operator >> (istream &in, Texture::QualityLevel &tql) {
10848 string word;
10849 in >> word;
10850
10852 return in;
10853}
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition: bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This class represents a thread-safe handle to a promised future result of an asynchronous operation,...
Definition: asyncFuture.h:61
An instance of this class is written to the front of a Bam or Txo file to make the file a cached inst...
void add_dependent_file(const Filename &pathname)
Adds the indicated file to the list of files that will be loaded to generate the data in this record.
get_data
Returns a pointer to the data stored in the record, or NULL if there is no data.
set_data
Stores a new data object on the record.
This class maintains a cache of Bam and/or Txo objects generated from model files and texture images ...
Definition: bamCache.h:42
get_cache_textures
Returns whether texture files (e.g.
Definition: bamCache.h:90
bool store(BamCacheRecord *record)
Flushes a cache entry to disk.
Definition: bamCache.cxx:194
static BamCache * get_global_ptr()
Returns a pointer to the global BamCache object, which is used automatically by the ModelPool and Tex...
Definition: bamCache.I:223
get_cache_compressed_textures
Returns whether compressed texture files will be stored in the cache, as compressed txo files.
Definition: bamCache.h:92
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:808
bool resolve()
This may be called at any time during processing of the Bam file to resolve all the known pointers so...
Definition: bamReader.cxx:325
bool init()
Initializes the BamReader prior to reading any objects from its source.
Definition: bamReader.cxx:85
get_filename
If a BAM is a file, then the BamReader should contain the name of the file.
Definition: bamReader.h:155
TypedWritable * read_object()
Reads a single object from the Bam file.
Definition: bamReader.cxx:224
get_loader_options
Returns the LoaderOptions passed to the loader when the model was requested, if any.
Definition: bamReader.h:156
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being read.
Definition: bamReader.I:83
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:63
get_file_texture_mode
Returns the BamTextureMode preference indicated by the Bam file currently being written.
Definition: bamWriter.h:95
get_filename
If a BAM is a file, then the BamWriter should contain the name of the file.
Definition: bamWriter.h:92
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being written.
Definition: bamWriter.I:59
get_active
Returns the active flag associated with this object.
Definition: bufferContext.h:55
get_resident
Returns the resident flag associated with this object.
Definition: bufferContext.h:56
get_data_size_bytes
Returns the number of bytes previously reported for the data object.
Definition: bufferContext.h:53
void notify_all()
Informs all of the other threads who are currently blocked on wait() that the relevant condition has ...
void wait()
Waits on the condition.
This class specializes ConfigVariable as an enumerated type.
int get_word(size_t n) const
Returns the variable's nth value.
std::string get_unique_value(size_t n) const
Returns the nth unique value of the variable.
size_t get_num_unique_values() const
Returns the number of unique values in the variable.
PointerToArray< Element > cast_non_const() const
Casts away the constness of the CPTA(Element), and returns an equivalent PTA(Element).
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
Definition: cullTraverser.h:45
This template class calls PipelineCycler::read() in the constructor and PipelineCycler::release_read(...
This template class calls PipelineCycler::read_unlocked(), and then provides a transparent read-only ...
This template class calls PipelineCycler::write() in the constructor and PipelineCycler::release_writ...
A single page of data maintained by a PipelineCycler.
Definition: cycleData.h:50
This class stores a list of directories that can be searched, in order, to locate a particular file.
Definition: dSearchPath.h:28
This class can be used to read a binary file that consists of an arbitrary header followed by a numbe...
bool read_header(std::string &header, size_t num_bytes)
Reads a sequence of bytes from the beginning of the datagram file.
bool open(const FileReference *file)
Opens the indicated filename for reading.
A class to retrieve the individual data elements previously stored in a Datagram.
uint8_t get_uint8()
Extracts an unsigned 8-bit integer.
vector_uchar extract_bytes(size_t size)
Extracts the indicated number of bytes in the datagram and returns them as a string.
uint32_t get_uint32()
Extracts an unsigned 32-bit integer.
bool get_bool()
Extracts a boolean value.
std::string get_string()
Extracts a variable-length string.
int32_t get_int32()
Extracts a signed 32-bit integer.
size_t get_remaining_size() const
Return the bytes left in the datagram.
This class can be used to write a binary file that consists of an arbitrary header followed by a numb...
bool open(const FileReference *file)
Opens the indicated filename for writing.
bool write_header(const std::string &header)
Writes a sequence of bytes to the beginning of the datagram file.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:38
void add_uint32(uint32_t value)
Adds an unsigned 32-bit integer to the datagram.
Definition: datagram.I:94
void add_int16(int16_t value)
Adds a signed 16-bit integer to the datagram.
Definition: datagram.I:58
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:67
void add_uint8(uint8_t value)
Adds an unsigned 8-bit integer to the datagram.
Definition: datagram.I:50
void add_bool(bool value)
Adds a boolean value to the datagram.
Definition: datagram.I:34
void append_data(const void *data, size_t size)
Appends some more raw data to the end of the datagram.
Definition: datagram.cxx:129
void add_string(const std::string &str)
Adds a variable-length string to the datagram.
Definition: datagram.I:219
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:36
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:73
The name of a file, such as a texture file or an Egg file.
Definition: filename.h:39
std::string get_basename() const
Returns the basename part of the filename.
Definition: filename.I:367
Filename get_filename_index(int index) const
If the pattern flag is set for this Filename and the filename string actually includes a sequence of ...
Definition: filename.cxx:836
bool has_hash() const
Returns true if the filename is indicated to be a filename pattern (that is, set_pattern(true) was ca...
Definition: filename.I:531
void set_basename_wo_extension(const std::string &s)
Replaces the basename part of the filename, without the file extension.
Definition: filename.cxx:783
int find_on_searchpath(const DSearchPath &searchpath)
Performs the reverse of the resolve_filename() operation: assuming that the current filename is fully...
Definition: filename.cxx:1689
bool make_relative_to(Filename directory, bool allow_backups=true)
Adjusts this filename, which must be a fully-specified pathname beginning with a slash,...
Definition: filename.cxx:1640
std::string get_basename_wo_extension() const
Returns the basename part of the filename, without the file extension.
Definition: filename.I:386
void make_absolute()
Converts the filename to a fully-qualified pathname from the root (if it is a relative pathname),...
Definition: filename.cxx:968
static Filename pattern_filename(const std::string &filename)
Constructs a filename that represents a sequence of numbered files.
Definition: filename.I:160
This class can be used to test for string matches against standard Unix- shell filename globbing conv...
Definition: globPattern.h:32
bool matches(const std::string &candidate) const
Returns true if the candidate string matches the pattern, false otherwise.
Definition: globPattern.I:122
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
static GraphicsStateGuardianBase * get_default_gsg()
Returns a pointer to the "default" GSG.
Encodes a string name in a hash table, mapping it to a pointer.
Definition: internalName.h:38
get_name
Returns the complete name represented by the InternalName and all of its parents.
Definition: internalName.h:61
Specifies parameters that may be passed to the loader.
Definition: loaderOptions.h:23
set_auto_texture_scale
Set this flag to ATS_none, ATS_up, ATS_down, or ATS_pad to control how a texture is scaled from disk ...
Definition: loaderOptions.h:69
get_auto_texture_scale
See set_auto_texture_scale().
Definition: loaderOptions.h:69
get_texture_num_views
See set_texture_num_views().
Definition: loaderOptions.h:64
void unlock()
Alias for release() to match C++11 semantics.
Definition: mutexDirect.I:39
void lock()
Alias for acquire() to match C++11 semantics.
Definition: mutexDirect.I:19
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
Definition: mutexHolder.h:25
A base class for all things which can have a name.
Definition: namable.h:26
bool has_name() const
Returns true if the Namable has a nonempty name set, false if the name is empty.
Definition: namable.I:44
static PNMFileTypeRegistry * get_global_ptr()
Returns a pointer to the global PNMFileTypeRegistry object.
PNMFileType * get_type_from_extension(const std::string &filename) const
Tries to determine what the PNMFileType is likely to be for a particular image file based on its exte...
This is the base class of a family of classes that represent particular image file types that PNMImag...
Definition: pnmFileType.h:32
get_maxval
Returns the maximum channel value allowable for any pixel in this image; for instance,...
int get_x_size() const
Returns the number of pixels in the X direction.
PNMReader * make_reader(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true) const
Returns a newly-allocated PNMReader of the suitable type for reading from the indicated image filenam...
static bool is_grayscale(ColorType color_type)
This static variant of is_grayscale() returns true if the indicated image type represents a grayscale...
get_num_channels
Returns the number of channels in the image.
static bool has_alpha(ColorType color_type)
This static variant of has_alpha() returns true if the indicated image type includes an alpha channel...
int get_y_size() const
Returns the number of pixels in the Y direction.
get_type
If the file type is known (e.g.
The name of this class derives from the fact that we originally implemented it as a layer on top of t...
Definition: pnmImage.h:58
void clear()
Frees all memory allocated for the image, and clears all its parameters (size, color,...
Definition: pnmImage.cxx:48
void set_read_size(int x_size, int y_size)
Specifies the size to we'd like to scale the image upon reading it.
Definition: pnmImage.I:288
xelval get_channel_val(int x, int y, int channel) const
Returns the nth component color at the indicated pixel.
Definition: pnmImage.cxx:837
void set_blue(int x, int y, float b)
Sets the blue component color only at the indicated pixel.
Definition: pnmImage.I:836
void alpha_fill(float alpha=0.0)
Sets the entire alpha channel to the given level.
Definition: pnmImage.I:272
xelval get_green_val(int x, int y) const
Returns the green component color at the indicated pixel.
Definition: pnmImage.I:462
void set_green(int x, int y, float g)
Sets the green component color only at the indicated pixel.
Definition: pnmImage.I:827
float get_alpha(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:809
float get_gray(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:799
void quick_filter_from(const PNMImage &copy, int xborder=0, int yborder=0)
Resizes from the given image, with a fixed radius of 0.5.
void fill(float red, float green, float blue)
Sets the entire image (except the alpha channel) to the given color.
Definition: pnmImage.I:246
void set_num_channels(int num_channels)
Changes the number of channels associated with the image.
Definition: pnmImage.I:353
xelval get_alpha_val(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:494
void set_red(int x, int y, float r)
Sets the red component color only at the indicated pixel.
Definition: pnmImage.I:818
void copy_header_from(const PNMImageHeader &header)
Copies just the header information into this image.
Definition: pnmImage.cxx:200
void take_from(PNMImage &orig)
Move the contents of the other image into this one, and empty the other image.
Definition: pnmImage.cxx:224
bool is_valid() const
Returns true if the image has been read in or correctly initialized with a height and width.
Definition: pnmImage.I:342
xelval get_blue_val(int x, int y) const
Returns the blue component color at the indicated pixel.
Definition: pnmImage.I:472
bool read(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true)
Reads the indicated image filename.
Definition: pnmImage.cxx:278
xel * get_array()
Directly access the underlying PNMImage array.
Definition: pnmImage.I:1098
xelval get_red_val(int x, int y) const
Returns the red component color at the indicated pixel.
Definition: pnmImage.I:452
int get_read_y_size() const
Returns the requested y_size of the image if set_read_size() has been called, or the image y_size oth...
Definition: pnmImage.I:324
xelval get_gray_val(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:484
void set_alpha(int x, int y, float a)
Sets the alpha component color only at the indicated pixel.
Definition: pnmImage.I:859
ColorSpace get_color_space() const
Returns the color space in which the image is encoded.
Definition: pnmImage.I:332
void add_alpha()
Adds an alpha channel to the image, if it does not already have one.
Definition: pnmImage.I:363
xelval * get_alpha_array()
Directly access the underlying PNMImage array of alpha values.
Definition: pnmImage.I:1115
bool write(const Filename &filename, PNMFileType *type=nullptr) const
Writes the image to the indicated filename.
Definition: pnmImage.cxx:385
int get_read_x_size() const
Returns the requested x_size of the image if set_read_size() has been called, or the image x_size oth...
Definition: pnmImage.I:315
This is an abstract base class that defines the interface for reading image files of various types.
Definition: pnmReader.h:27
virtual bool is_floating_point()
Returns true if this PNMFileType represents a floating-point image type, false if it is a normal,...
Definition: pnmReader.cxx:71
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:30
Defines a pfm file, a 2-d table of floating-point numbers, either 3-component or 1-component,...
Definition: pfmFile.h:31
bool read(const Filename &fullpath)
Reads the PFM data from the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:121
bool write(const Filename &fullpath)
Writes the PFM data to the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:204
bool store(PNMImage &pnmimage) const
Copies the data to the indicated PNMImage, converting to RGB values.
Definition: pfmFile.cxx:360
void set_channel(int x, int y, int c, PN_float32 value)
Replaces the cth channel of the point value at the indicated point.
Definition: pfmFile.I:63
bool load(const PNMImage &pnmimage)
Fills the PfmFile with the data from the indicated PNMImage, converted to floating-point values.
Definition: pfmFile.cxx:287
PN_float32 get_channel(int x, int y, int c) const
Returns the cth channel of the point value at the indicated point.
Definition: pfmFile.I:52
void clear()
Eliminates all data in the file.
Definition: pfmFile.cxx:77
A table of objects that are saved within the graphics context for reference by handle later.
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
void ref() const
Explicitly increments the reference count.
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
Represents a set of settings that indicate how a texture is sampled.
Definition: samplerState.h:36
get_minfilter
Returns the filter mode of the texture for minification.
Definition: samplerState.h:115
get_wrap_v
Returns the wrap mode of the texture in the V direction.
Definition: samplerState.h:113
get_anisotropic_degree
Returns the degree of anisotropic filtering that should be applied to the texture.
Definition: samplerState.h:119
get_magfilter
Returns the filter mode of the texture for magnification.
Definition: samplerState.h:116
get_wrap_w
Returns the wrap mode of the texture in the W direction.
Definition: samplerState.h:114
get_wrap_u
Returns the wrap mode of the texture in the U direction.
Definition: samplerState.h:112
get_border_color
Returns the solid color of the texture's border.
Definition: samplerState.h:121
A class to read sequential binary data directly from an istream.
Definition: streamReader.h:28
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool was_image_modified() const
Returns true if the texture image has been modified since the last time mark_loaded() was called.
An instance of this object is returned by Texture::peek().
Definition: texturePeeker.h:27
static Texture * load_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads the given filename up into a texture, if it has not already been loaded, and returns the new te...
Definition: texturePool.I:70
static Texture * get_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false)
Returns the texture that has already been previously loaded, or NULL otherwise.
Definition: texturePool.I:41
static Texture * load_2d_texture_array(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 2-D texture array that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:124
static void add_texture(Texture *texture)
Adds the indicated already-loaded texture to the pool.
Definition: texturePool.I:177
static Texture * load_cube_map(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a cube map texture that is specified with a series of 6 pages, numbered 0 through 5.
Definition: texturePool.I:141
static Texture * load_3d_texture(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 3-D texture that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:107
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition: texture.h:71
CPTA_uchar get_ram_image_as(const std::string &requested_format)
Returns the uncompressed system-RAM image data associated with the texture.
Definition: texture.cxx:7398
static TextureType string_texture_type(const std::string &str)
Returns the TextureType corresponding to the indicated string word.
Definition: texture.cxx:2104
virtual void ensure_loader_type(const Filename &filename)
May be called prior to calling read_txo() or any bam-related Texture- creating callback,...
Definition: texture.cxx:2837
static PT(Texture) make_from_txo(std bool write_txo(std::ostream &out, const std::string &filename="") const
Writes the texture to a Panda texture object.
Definition: texture.cxx:929
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition: texture.cxx:1982
static std::string format_component_type(ComponentType ct)
Returns the indicated ComponentType converted to a string word.
Definition: texture.cxx:2130
Texture(const std::string &name=std::string())
Constructs an empty texture.
Definition: texture.cxx:375
bool get_resident(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture is reported to be resident within graphics memory for the indicated GSG.
Definition: texture.cxx:1547
Texture * load_related(const InternalName *suffix) const
Loads a texture whose filename is derived by concatenating a suffix to the filename of this texture.
Definition: texture.cxx:974
static CompressionMode string_compression_mode(const std::string &str)
Returns the CompressionMode value associated with the given string representation.
Definition: texture.cxx:2463
PTA_uchar new_simple_ram_image(int x_size, int y_size)
Creates an empty array for the simple ram image of the indicated size, and returns a modifiable point...
Definition: texture.cxx:1304
static bool is_specific(CompressionMode compression)
Returns true if the indicated compression mode is one of the specific compression types,...
Definition: texture.cxx:2617
bool has_ram_image() const
Returns true if the Texture has its image contents available in main RAM, false if it exists only in ...
Definition: texture.I:1242
static std::string format_quality_level(QualityLevel tql)
Returns the indicated QualityLevel converted to a string word.
Definition: texture.cxx:2506
size_t estimate_texture_memory() const
Estimates the amount of texture memory that will be consumed by loading this texture.
Definition: texture.cxx:676
bool read(const Filename &fullpath, const LoaderOptions &options=LoaderOptions())
Reads the named filename into the texture.
Definition: texture.cxx:552
void consider_rescale(PNMImage &pnmimage)
Asks the PNMImage to change its scale when it reads the image, according to the whims of the Config....
Definition: texture.cxx:2040
get_texture_type
Returns the overall interpretation of the texture.
Definition: texture.h:365
bool write(const Filename &fullpath)
Writes the texture to the named filename.
Definition: texture.I:298
static bool has_binary_alpha(Format format)
Returns true if the indicated format includes a binary alpha only, false otherwise.
Definition: texture.cxx:2664
void * get_ram_mipmap_pointer(int n) const
Similiar to get_ram_mipmap_image(), however, in this case the void pointer for the given ram image is...
Definition: texture.cxx:1229
static std::string format_compression_mode(CompressionMode cm)
Returns the indicated CompressionMode converted to a string word.
Definition: texture.cxx:2421
get_aux_data
Returns a record previously recorded via set_aux_data().
Definition: texture.h:552
static bool is_srgb(Format format)
Returns true if the indicated format is in the sRGB color space, false otherwise.
Definition: texture.cxx:2679
void set_orig_file_size(int x, int y, int z=1)
Specifies the size of the texture as it exists in its original disk file, before any Panda scaling.
Definition: texture.cxx:1962
bool get_active(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture was rendered in the most recent frame within the indicated GSG.
Definition: texture.cxx:1520
get_keep_ram_image
Returns the flag that indicates whether this Texture is eligible to have its main RAM copy of the tex...
Definition: texture.h:472
bool read_dds(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a DDS file object.
Definition: texture.cxx:944
void generate_normalization_cube_map(int size)
Generates a special cube map image in the texture that can be used to apply bump mapping effects: for...
Definition: texture.cxx:425
bool has_compression() const
Returns true if the texture indicates it wants to be compressed, either with CM_on or higher,...
Definition: texture.I:1102
static QualityLevel string_quality_level(const std::string &str)
Returns the QualityLevel value associated with the given string representation.
Definition: texture.cxx:2526
void generate_alpha_scale_map()
Generates a special 256x1 1-d texture that can be used to apply an arbitrary alpha scale to objects b...
Definition: texture.cxx:527
bool read_txo(std::istream &in, const std::string &filename="")
Reads the texture from a Panda texture object.
Definition: texture.cxx:846
static ComponentType string_component_type(const std::string &str)
Returns the ComponentType corresponding to the indicated string word.
Definition: texture.cxx:2159
static void register_with_read_factory()
Factory method to generate a Texture object.
Definition: texture.cxx:10001
static bool adjust_size(int &x_size, int &y_size, const std::string &name, bool for_padding, AutoTextureScale auto_texture_scale=ATS_unspecified)
Computes the proper size of the texture, based on the original size, the filename,...
Definition: texture.cxx:2727
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
Definition: texture.cxx:10029
static int up_to_power_2(int value)
Returns the smallest power of 2 greater than or equal to value.
Definition: texture.cxx:2009
static AutoTextureScale get_textures_power_2()
This flag returns ATS_none, ATS_up, or ATS_down and controls the scaling of textures in general.
Definition: texture.I:1863
get_auto_texture_scale
Returns the power-of-2 texture-scaling mode that will be applied to this particular texture when it i...
Definition: texture.h:532
void set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size)
Accepts a raw pointer cast as an int, which is then passed to set_ram_mipmap_pointer(); see the docum...
Definition: texture.cxx:1270
virtual void write_datagram(BamWriter *manager, Datagram &me)
Function to write the important information in the particular object to a Datagram.
Definition: texture.cxx:10010
static int down_to_power_2(int value)
Returns the largest power of 2 less than or equal to value.
Definition: texture.cxx:2021
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition: texture.cxx:1574
virtual bool has_cull_callback() const
Should be overridden by derived classes to return true if cull_callback() has been defined.
Definition: texture.cxx:2575
bool uses_mipmaps() const
Returns true if the minfilter settings on this texture indicate the use of mipmapping,...
Definition: texture.I:1127
static std::string format_texture_type(TextureType tt)
Returns the indicated TextureType converted to a string word.
Definition: texture.cxx:2078
has_simple_ram_image
Returns true if the Texture has a "simple" image available in main RAM.
Definition: texture.h:517
static bool is_integer(Format format)
Returns true if the indicated format is an integer format, false otherwise.
Definition: texture.cxx:2696
PTA_uchar modify_simple_ram_image()
Returns a modifiable pointer to the internal "simple" texture image.
Definition: texture.cxx:1293
void clear_ram_mipmap_image(int n)
Discards the current system-RAM image for the nth mipmap level.
Definition: texture.cxx:1278
bool was_image_modified(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture needs to be re-loaded onto the indicated GSG, either because its image da...
Definition: texture.cxx:1461
bool read_ktx(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a KTX file object.
Definition: texture.cxx:961
size_t get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const
Returns the number of bytes which the texture is reported to consume within graphics memory,...
Definition: texture.cxx:1493
get_expected_ram_page_size
Returns the number of bytes that should be used per each Z page of the 3-d texture.
Definition: texture.h:449
virtual bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
If has_cull_callback() returns true, this function will be called during the cull traversal to perfor...
Definition: texture.cxx:2589
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition: texture.cxx:1445
void set_ram_image_as(CPTA_uchar image, const std::string &provided_format)
Replaces the current system-RAM image with the new data, converting it first if necessary from the in...
Definition: texture.cxx:1026
void set_ram_mipmap_pointer(int n, void *image, size_t page_size=0)
Sets an explicit void pointer as the texture's mipmap image for the indicated level.
Definition: texture.cxx:1248
set_aux_data
Records an arbitrary object in the Texture, associated with a specified key.
Definition: texture.h:552
void texture_uploaded()
This method is called by the GraphicsEngine at the beginning of the frame *after* a texture has been ...
Definition: texture.cxx:2552
void set_size_padded(int x=1, int y=1, int z=1)
Changes the size of the texture, padding if necessary, and setting the pad region as well.
Definition: texture.cxx:1933
static bool has_alpha(Format format)
Returns true if the indicated format includes alpha, false otherwise.
Definition: texture.cxx:2633
get_num_loadable_ram_mipmap_images
Returns the number of contiguous mipmap levels that exist in RAM, up until the first gap in the seque...
Definition: texture.h:502
void generate_simple_ram_image()
Computes the "simple" ram image by loading the main RAM image, if it is not already available,...
Definition: texture.cxx:1325
static Format string_format(const std::string &str)
Returns the Format corresponding to the indicated string word.
Definition: texture.cxx:2303
clear_aux_data
Removes a record previously recorded via set_aux_data().
Definition: texture.h:552
int release_all()
Frees the context allocated on all objects for which the texture has been declared.
Definition: texture.cxx:1600
CPTA_uchar get_ram_mipmap_image(int n) const
Returns the system-RAM image data associated with the nth mipmap level, if present.
Definition: texture.cxx:1215
static std::string format_format(Format f)
Returns the indicated Format converted to a string word.
Definition: texture.cxx:2189
is_cacheable
Returns true if there is enough information in this Texture object to write it to the bam cache succe...
Definition: texture.h:473
static bool is_unsigned(ComponentType ctype)
Returns true if the indicated component type is unsigned, false otherwise.
Definition: texture.cxx:2605
A thread; that is, a lightweight process.
Definition: thread.h:46
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition: thread.I:212
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition: thread.h:109
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
bool is_exact_type(TypeHandle handle) const
Returns true if the current object is the indicated type exactly.
Definition: typedObject.I:38
bool is_of_type(TypeHandle handle) const
Returns true if the current object is or derives from the indicated type.
Definition: typedObject.I:28
A base class for things which need to inherit from both TypedObject and from ReferenceCount.
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:35
A hierarchy of directories and files that appears to be one continuous file system,...
static void close_write_file(std::ostream *stream)
Closes a file opened by a previous call to open_write_file().
Filename get_cwd() const
Returns the current directory name.
bool exists(const Filename &filename) const
Convenience function; returns true if the named file exists.
bool resolve_filename(Filename &filename, const DSearchPath &searchpath, const std::string &default_extension=std::string()) const
Searches the given search path for the filename.
std::ostream * open_write_file(const Filename &filename, bool auto_wrap, bool truncate)
Convenience function; returns a newly allocated ostream if the file exists and can be written,...
static void close_read_file(std::istream *stream)
Closes a file opened by a previous call to open_read_file().
PointerTo< VirtualFile > get_file(const Filename &filename, bool status_only=false) const
Looks up the file by the indicated name in the file system.
static VirtualFileSystem * get_global_ptr()
Returns the default global VirtualFileSystem.
The abstract base class for a file or directory within the VirtualFileSystem.
Definition: virtualFile.h:35
This is our own Panda specialization on the default STL map.
Definition: pmap.h:49
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
BEGIN_PUBLISH EXPCL_PANDA_PNMIMAGE float decode_sRGB_float(unsigned char val)
Decodes the sRGB-encoded unsigned char value to a linearized float in the range 0-1.
Definition: convert_srgb.I:18
EXPCL_PANDA_PNMIMAGE unsigned char encode_sRGB_uchar(unsigned char val)
Encodes the linearized unsigned char value to an sRGB-encoded unsigned char value.
Definition: convert_srgb.I:80
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition: indent.cxx:20
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition: pbitops.I:175
int get_next_higher_bit(unsigned short x)
Returns the smallest power of 2 greater than x.
Definition: pbitops.I:328
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
string upcase(const string &s)
Returns the input string with all lowercase letters converted to uppercase.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void release_read(const CycleData *pointer) const
Releases a pointer previously obtained via a call to read().
CycleDataType * write_upstream(bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::write_upstream().
CycleDataType * elevate_read_upstream(const CycleDataType *pointer, bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::elevate_read_upstream().
const CycleDataType * read(Thread *current_thread) const
See PipelineCyclerBase::read().
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PT(Texture) Texture
Constructs a new Texture object from the txo file.
Definition: texture.cxx:860
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.