Panda3D
Loading...
Searching...
No Matches
texture.cxx
Go to the documentation of this file.
1/**
2 * PANDA 3D SOFTWARE
3 * Copyright (c) Carnegie Mellon University. All rights reserved.
4 *
5 * All use of this software is subject to the terms of the revised BSD
6 * license. You should have received a copy of this license along
7 * with this source code in a file named "LICENSE."
8 *
9 * @file texture.cxx
10 * @author mike
11 * @date 1997-01-09
12 * @author fperazzi, PandaSE
13 * @date 2010-04-29
14 */
15
16#include "pandabase.h"
17#include "texture.h"
18#include "config_gobj.h"
19#include "config_putil.h"
20#include "texturePool.h"
21#include "textureContext.h"
22#include "bamCache.h"
23#include "bamCacheRecord.h"
24#include "datagram.h"
25#include "datagramIterator.h"
26#include "bamReader.h"
27#include "bamWriter.h"
28#include "string_utils.h"
30#include "pnmImage.h"
31#include "pnmReader.h"
32#include "pfmFile.h"
33#include "pnmFileTypeRegistry.h"
34#include "virtualFileSystem.h"
35#include "datagramInputFile.h"
36#include "datagramOutputFile.h"
37#include "bam.h"
38#include "zStream.h"
39#include "indent.h"
40#include "cmath.h"
41#include "pStatTimer.h"
42#include "pbitops.h"
43#include "streamReader.h"
44#include "texturePeeker.h"
45#include "convert_srgb.h"
46
47#ifdef HAVE_SQUISH
48#include <squish.h>
49#endif // HAVE_SQUISH
50
51#include <stddef.h>
52
53using std::endl;
54using std::istream;
55using std::max;
56using std::min;
57using std::ostream;
58using std::string;
59using std::swap;
60
62("texture-quality-level", Texture::QL_normal,
63 PRC_DESC("This specifies a global quality level for all textures. You "
64 "may specify either fastest, normal, or best. This actually "
65 "affects the meaning of Texture::set_quality_level(QL_default), "
66 "so it may be overridden on a per-texture basis. This generally "
67 "only has an effect when using the tinydisplay software renderer; "
68 "it has little or no effect on normal, hardware-accelerated "
69 "renderers. See Texture::set_quality_level()."));
70
71PStatCollector Texture::_texture_read_pcollector("*:Texture:Read");
72TypeHandle Texture::_type_handle;
73TypeHandle Texture::CData::_type_handle;
74AutoTextureScale Texture::_textures_power_2 = ATS_unspecified;
75
76// Stuff to read and write DDS files.
77
78// little-endian, of course
79#define DDS_MAGIC 0x20534444
80
81
82// DDS_header.dwFlags
83#define DDSD_CAPS 0x00000001
84#define DDSD_HEIGHT 0x00000002
85#define DDSD_WIDTH 0x00000004
86#define DDSD_PITCH 0x00000008
87#define DDSD_PIXELFORMAT 0x00001000
88#define DDSD_MIPMAPCOUNT 0x00020000
89#define DDSD_LINEARSIZE 0x00080000
90#define DDSD_DEPTH 0x00800000
91
92// DDS_header.sPixelFormat.dwFlags
93#define DDPF_ALPHAPIXELS 0x00000001
94#define DDPF_FOURCC 0x00000004
95#define DDPF_INDEXED 0x00000020
96#define DDPF_RGB 0x00000040
97
98// DDS_header.sCaps.dwCaps1
99#define DDSCAPS_COMPLEX 0x00000008
100#define DDSCAPS_TEXTURE 0x00001000
101#define DDSCAPS_MIPMAP 0x00400000
102
103// DDS_header.sCaps.dwCaps2
104#define DDSCAPS2_CUBEMAP 0x00000200
105#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
106#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
107#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
108#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
109#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
110#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
111#define DDSCAPS2_VOLUME 0x00200000
112
113struct DDSPixelFormat {
114 unsigned int pf_size;
115 unsigned int pf_flags;
116 unsigned int four_cc;
117 unsigned int rgb_bitcount;
118 unsigned int r_mask;
119 unsigned int g_mask;
120 unsigned int b_mask;
121 unsigned int a_mask;
122};
123
124struct DDSCaps2 {
125 unsigned int caps1;
126 unsigned int caps2;
127 unsigned int ddsx;
128};
129
130struct DDSHeader {
131 unsigned int dds_magic;
132 unsigned int dds_size;
133 unsigned int dds_flags;
134 unsigned int height;
135 unsigned int width;
136 unsigned int pitch;
137 unsigned int depth;
138 unsigned int num_levels;
139
140 DDSPixelFormat pf;
141 DDSCaps2 caps;
142};
143
144// Stuff to read KTX files.
145enum KTXType {
146 KTX_BYTE = 0x1400,
147 KTX_UNSIGNED_BYTE = 0x1401,
148 KTX_SHORT = 0x1402,
149 KTX_UNSIGNED_SHORT = 0x1403,
150 KTX_INT = 0x1404,
151 KTX_UNSIGNED_INT = 0x1405,
152 KTX_FLOAT = 0x1406,
153 KTX_HALF_FLOAT = 0x140B,
154 KTX_UNSIGNED_BYTE_3_3_2 = 0x8032,
155 KTX_UNSIGNED_SHORT_4_4_4_4 = 0x8033,
156 KTX_UNSIGNED_SHORT_5_5_5_1 = 0x8034,
157 KTX_UNSIGNED_INT_8_8_8_8 = 0x8035,
158 KTX_UNSIGNED_INT_10_10_10_2 = 0x8036,
159 KTX_UNSIGNED_BYTE_2_3_3_REV = 0x8362,
160 KTX_UNSIGNED_SHORT_5_6_5 = 0x8363,
161 KTX_UNSIGNED_SHORT_5_6_5_REV = 0x8364,
162 KTX_UNSIGNED_SHORT_4_4_4_4_REV = 0x8365,
163 KTX_UNSIGNED_SHORT_1_5_5_5_REV = 0x8366,
164 KTX_UNSIGNED_INT_8_8_8_8_REV = 0x8367,
165 KTX_UNSIGNED_INT_2_10_10_10_REV = 0x8368,
166 KTX_UNSIGNED_INT_24_8 = 0x84FA,
167 KTX_UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
168 KTX_UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
169 KTX_FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
170};
171
172enum KTXFormat {
173 KTX_ALPHA = 0x1906,
174 KTX_ALPHA12 = 0x803D,
175 KTX_ALPHA16 = 0x803E,
176 KTX_ALPHA16_SNORM = 0x9018,
177 KTX_ALPHA4 = 0x803B,
178 KTX_ALPHA8 = 0x803C,
179 KTX_ALPHA8_SNORM = 0x9014,
180 KTX_ALPHA_SNORM = 0x9010,
181 KTX_BGR = 0x80E0,
182 KTX_BGR_INTEGER = 0x8D9A,
183 KTX_BGRA = 0x80E1,
184 KTX_BGRA_INTEGER = 0x8D9B,
185 KTX_BLUE = 0x1905,
186 KTX_BLUE_INTEGER = 0x8D96,
187 KTX_COLOR_INDEX = 0x1900,
188 KTX_DEPTH24_STENCIL8 = 0x88F0,
189 KTX_DEPTH32F_STENCIL8 = 0x8CAD,
190 KTX_DEPTH_COMPONENT = 0x1902,
191 KTX_DEPTH_COMPONENT16 = 0x81A5,
192 KTX_DEPTH_COMPONENT24 = 0x81A6,
193 KTX_DEPTH_COMPONENT32 = 0x81A7,
194 KTX_DEPTH_COMPONENT32F = 0x8CAC,
195 KTX_DEPTH_STENCIL = 0x84F9,
196 KTX_GREEN = 0x1904,
197 KTX_GREEN_INTEGER = 0x8D95,
198 KTX_INTENSITY = 0x8049,
199 KTX_INTENSITY12 = 0x804C,
200 KTX_INTENSITY16 = 0x804D,
201 KTX_INTENSITY16_SNORM = 0x901B,
202 KTX_INTENSITY4 = 0x804A,
203 KTX_INTENSITY8 = 0x804B,
204 KTX_INTENSITY8_SNORM = 0x9017,
205 KTX_INTENSITY_SNORM = 0x9013,
206 KTX_LUMINANCE = 0x1909,
207 KTX_LUMINANCE12 = 0x8041,
208 KTX_LUMINANCE12_ALPHA12 = 0x8047,
209 KTX_LUMINANCE12_ALPHA4 = 0x8046,
210 KTX_LUMINANCE16 = 0x8042,
211 KTX_LUMINANCE16_ALPHA16 = 0x8048,
212 KTX_LUMINANCE16_ALPHA16_SNORM = 0x901A,
213 KTX_LUMINANCE16_SNORM = 0x9019,
214 KTX_LUMINANCE4 = 0x803F,
215 KTX_LUMINANCE4_ALPHA4 = 0x8043,
216 KTX_LUMINANCE6_ALPHA2 = 0x8044,
217 KTX_LUMINANCE8 = 0x8040,
218 KTX_LUMINANCE8_ALPHA8 = 0x8045,
219 KTX_LUMINANCE8_ALPHA8_SNORM = 0x9016,
220 KTX_LUMINANCE8_SNORM = 0x9015,
221 KTX_LUMINANCE_ALPHA = 0x190A,
222 KTX_LUMINANCE_ALPHA_SNORM = 0x9012,
223 KTX_LUMINANCE_SNORM = 0x9011,
224 KTX_R11F_G11F_B10F = 0x8C3A,
225 KTX_R16 = 0x822A,
226 KTX_R16_SNORM = 0x8F98,
227 KTX_R16F = 0x822D,
228 KTX_R16I = 0x8233,
229 KTX_R16UI = 0x8234,
230 KTX_R32F = 0x822E,
231 KTX_R32I = 0x8235,
232 KTX_R32UI = 0x8236,
233 KTX_R3_G3_B2 = 0x2A10,
234 KTX_R8 = 0x8229,
235 KTX_R8_SNORM = 0x8F94,
236 KTX_R8I = 0x8231,
237 KTX_R8UI = 0x8232,
238 KTX_RED = 0x1903,
239 KTX_RED_INTEGER = 0x8D94,
240 KTX_RED_SNORM = 0x8F90,
241 KTX_RG = 0x8227,
242 KTX_RG16 = 0x822C,
243 KTX_RG16_SNORM = 0x8F99,
244 KTX_RG16F = 0x822F,
245 KTX_RG16I = 0x8239,
246 KTX_RG16UI = 0x823A,
247 KTX_RG32F = 0x8230,
248 KTX_RG32I = 0x823B,
249 KTX_RG32UI = 0x823C,
250 KTX_RG8 = 0x822B,
251 KTX_RG8_SNORM = 0x8F95,
252 KTX_RG8I = 0x8237,
253 KTX_RG8UI = 0x8238,
254 KTX_RG_INTEGER = 0x8228,
255 KTX_RG_SNORM = 0x8F91,
256 KTX_RGB = 0x1907,
257 KTX_RGB10 = 0x8052,
258 KTX_RGB10_A2 = 0x8059,
259 KTX_RGB12 = 0x8053,
260 KTX_RGB16 = 0x8054,
261 KTX_RGB16_SNORM = 0x8F9A,
262 KTX_RGB16F = 0x881B,
263 KTX_RGB16I = 0x8D89,
264 KTX_RGB16UI = 0x8D77,
265 KTX_RGB2 = 0x804E,
266 KTX_RGB32F = 0x8815,
267 KTX_RGB32I = 0x8D83,
268 KTX_RGB32UI = 0x8D71,
269 KTX_RGB4 = 0x804F,
270 KTX_RGB5 = 0x8050,
271 KTX_RGB5_A1 = 0x8057,
272 KTX_RGB8 = 0x8051,
273 KTX_RGB8_SNORM = 0x8F96,
274 KTX_RGB8I = 0x8D8F,
275 KTX_RGB8UI = 0x8D7D,
276 KTX_RGB9_E5 = 0x8C3D,
277 KTX_RGB_INTEGER = 0x8D98,
278 KTX_RGB_SNORM = 0x8F92,
279 KTX_RGBA = 0x1908,
280 KTX_RGBA12 = 0x805A,
281 KTX_RGBA16 = 0x805B,
282 KTX_RGBA16_SNORM = 0x8F9B,
283 KTX_RGBA16F = 0x881A,
284 KTX_RGBA16I = 0x8D88,
285 KTX_RGBA16UI = 0x8D76,
286 KTX_RGBA2 = 0x8055,
287 KTX_RGBA32F = 0x8814,
288 KTX_RGBA32I = 0x8D82,
289 KTX_RGBA32UI = 0x8D70,
290 KTX_RGBA4 = 0x8056,
291 KTX_RGBA8 = 0x8058,
292 KTX_RGBA8_SNORM = 0x8F97,
293 KTX_RGBA8I = 0x8D8E,
294 KTX_RGBA8UI = 0x8D7C,
295 KTX_RGBA_INTEGER = 0x8D99,
296 KTX_RGBA_SNORM = 0x8F93,
297 KTX_SLUMINANCE = 0x8C46,
298 KTX_SLUMINANCE8 = 0x8C47,
299 KTX_SLUMINANCE8_ALPHA8 = 0x8C45,
300 KTX_SLUMINANCE_ALPHA = 0x8C44,
301 KTX_SRGB = 0x8C40,
302 KTX_SRGB8 = 0x8C41,
303 KTX_SRGB8_ALPHA8 = 0x8C43,
304 KTX_SRGB_ALPHA = 0x8C42,
305 KTX_STENCIL_INDEX = 0x1901,
306 KTX_STENCIL_INDEX1 = 0x8D46,
307 KTX_STENCIL_INDEX16 = 0x8D49,
308 KTX_STENCIL_INDEX4 = 0x8D47,
309 KTX_STENCIL_INDEX8 = 0x8D48,
310};
311
312enum KTXCompressedFormat {
313 KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2 = 0x8C72,
314 KTX_COMPRESSED_LUMINANCE_LATC1 = 0x8C70,
315 KTX_COMPRESSED_R11_EAC = 0x9270,
316 KTX_COMPRESSED_RED = 0x8225,
317 KTX_COMPRESSED_RED_RGTC1 = 0x8DBB,
318 KTX_COMPRESSED_RG = 0x8226,
319 KTX_COMPRESSED_RG11_EAC = 0x9272,
320 KTX_COMPRESSED_RG_RGTC2 = 0x8DBD,
321 KTX_COMPRESSED_RGB = 0x84ED,
322 KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
323 KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276,
324 KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
325 KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
326 KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
327 KTX_COMPRESSED_RGB_PVRTC_2BPPV1_IMG = 0x8C01,
328 KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
329 KTX_COMPRESSED_RGB_S3TC_DXT1 = 0x83F0,
330 KTX_COMPRESSED_RGBA = 0x84EE,
331 KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
332 KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
333 KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
334 KTX_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG = 0x8C03,
335 KTX_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG = 0x9137,
336 KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
337 KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
338 KTX_COMPRESSED_RGBA_S3TC_DXT1 = 0x83F1,
339 KTX_COMPRESSED_RGBA_S3TC_DXT3 = 0x83F2,
340 KTX_COMPRESSED_RGBA_S3TC_DXT5 = 0x83F3,
341 KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 = 0x8C73,
342 KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1 = 0x8C71,
343 KTX_COMPRESSED_SIGNED_R11_EAC = 0x9271,
344 KTX_COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
345 KTX_COMPRESSED_SIGNED_RG11_EAC = 0x9273,
346 KTX_COMPRESSED_SIGNED_RG_RGTC2 = 0x8DBE,
347 KTX_COMPRESSED_SLUMINANCE = 0x8C4A,
348 KTX_COMPRESSED_SLUMINANCE_ALPHA = 0x8C4B,
349 KTX_COMPRESSED_SRGB = 0x8C48,
350 KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279,
351 KTX_COMPRESSED_SRGB8_ETC2 = 0x9275,
352 KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277,
353 KTX_COMPRESSED_SRGB_ALPHA = 0x8C49,
354 KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
355 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1 = 0x8A56,
356 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2 = 0x93F0,
357 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1 = 0x8A57,
358 KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2 = 0x93F1,
359 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1 = 0x8C4D,
360 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3 = 0x8C4E,
361 KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5 = 0x8C4F,
362 KTX_COMPRESSED_SRGB_PVRTC_2BPPV1 = 0x8A54,
363 KTX_COMPRESSED_SRGB_PVRTC_4BPPV1 = 0x8A55,
364 KTX_COMPRESSED_SRGB_S3TC_DXT1 = 0x8C4C,
365 KTX_ETC1_RGB8 = 0x8D64,
366 KTX_ETC1_SRGB8 = 0x88EE,
367};
368
369/**
370 * Constructs an empty texture. The default is to set up the texture as an
371 * empty 2-d texture; follow up with one of the variants of setup_texture() if
372 * this is not what you want.
373 */
375Texture(const string &name) :
376 Namable(name),
377 _lock(name),
378 _cvar(_lock)
379{
380 _reloading = false;
381
382 CDWriter cdata(_cycler, true);
383 cdata->inc_properties_modified();
384}
385
386/**
387 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
388 */
390Texture(const Texture &copy) :
391 Namable(copy),
392 _cycler(copy._cycler),
393 _lock(copy.get_name()),
394 _cvar(_lock)
395{
396 _reloading = false;
397}
398
399/**
400 * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
401 */
402void Texture::
403operator = (const Texture &copy) {
404 Namable::operator = (copy);
405 _cycler = copy._cycler;
406}
407
408/**
409 *
410 */
411Texture::
412~Texture() {
413 release_all();
414 nassertv(!_reloading);
415}
416
417/**
418 * Generates a special cube map image in the texture that can be used to apply
419 * bump mapping effects: for each texel in the cube map that is indexed by the
420 * 3-d texture coordinates (x, y, z), the resulting value is the normalized
421 * vector (x, y, z) (compressed from -1..1 into 0..1).
422 */
425 CDWriter cdata(_cycler, true);
426 do_setup_texture(cdata, TT_cube_map, size, size, 6, T_unsigned_byte, F_rgb);
427 PTA_uchar image = do_make_ram_image(cdata);
428 cdata->_keep_ram_image = true;
429
430 cdata->inc_image_modified();
431 cdata->inc_properties_modified();
432
433 PN_stdfloat half_size = (PN_stdfloat)size * 0.5f;
434 PN_stdfloat center = half_size - 0.5f;
435
436 LMatrix4 scale
437 (127.5f, 0.0f, 0.0f, 0.0f,
438 0.0f, 127.5f, 0.0f, 0.0f,
439 0.0f, 0.0f, 127.5f, 0.0f,
440 127.5f, 127.5f, 127.5f, 1.0f);
441
442 unsigned char *p = image;
443 int xi, yi;
444
445 // Page 0: positive X.
446 for (yi = 0; yi < size; ++yi) {
447 for (xi = 0; xi < size; ++xi) {
448 LVector3 vec(half_size, center - yi, center - xi);
449 vec.normalize();
450 vec = scale.xform_point(vec);
451
452 *p++ = (unsigned char)vec[2];
453 *p++ = (unsigned char)vec[1];
454 *p++ = (unsigned char)vec[0];
455 }
456 }
457
458 // Page 1: negative X.
459 for (yi = 0; yi < size; ++yi) {
460 for (xi = 0; xi < size; ++xi) {
461 LVector3 vec(-half_size, center - yi, xi - center);
462 vec.normalize();
463 vec = scale.xform_point(vec);
464 *p++ = (unsigned char)vec[2];
465 *p++ = (unsigned char)vec[1];
466 *p++ = (unsigned char)vec[0];
467 }
468 }
469
470 // Page 2: positive Y.
471 for (yi = 0; yi < size; ++yi) {
472 for (xi = 0; xi < size; ++xi) {
473 LVector3 vec(xi - center, half_size, yi - center);
474 vec.normalize();
475 vec = scale.xform_point(vec);
476 *p++ = (unsigned char)vec[2];
477 *p++ = (unsigned char)vec[1];
478 *p++ = (unsigned char)vec[0];
479 }
480 }
481
482 // Page 3: negative Y.
483 for (yi = 0; yi < size; ++yi) {
484 for (xi = 0; xi < size; ++xi) {
485 LVector3 vec(xi - center, -half_size, center - yi);
486 vec.normalize();
487 vec = scale.xform_point(vec);
488 *p++ = (unsigned char)vec[2];
489 *p++ = (unsigned char)vec[1];
490 *p++ = (unsigned char)vec[0];
491 }
492 }
493
494 // Page 4: positive Z.
495 for (yi = 0; yi < size; ++yi) {
496 for (xi = 0; xi < size; ++xi) {
497 LVector3 vec(xi - center, center - yi, half_size);
498 vec.normalize();
499 vec = scale.xform_point(vec);
500 *p++ = (unsigned char)vec[2];
501 *p++ = (unsigned char)vec[1];
502 *p++ = (unsigned char)vec[0];
503 }
504 }
505
506 // Page 5: negative Z.
507 for (yi = 0; yi < size; ++yi) {
508 for (xi = 0; xi < size; ++xi) {
509 LVector3 vec(center - xi, center - yi, -half_size);
510 vec.normalize();
511 vec = scale.xform_point(vec);
512 *p++ = (unsigned char)vec[2];
513 *p++ = (unsigned char)vec[1];
514 *p++ = (unsigned char)vec[0];
515 }
516 }
517}
518
519/**
520 * Generates a special 256x1 1-d texture that can be used to apply an
521 * arbitrary alpha scale to objects by judicious use of texture matrix. The
522 * texture is a gradient, with an alpha of 0 on the left (U = 0), and 255 on
523 * the right (U = 1).
524 */
527 CDWriter cdata(_cycler, true);
528 do_setup_texture(cdata, TT_1d_texture, 256, 1, 1, T_unsigned_byte, F_alpha);
529 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
530 cdata->_default_sampler.set_minfilter(SamplerState::FT_nearest);
531 cdata->_default_sampler.set_magfilter(SamplerState::FT_nearest);
532
533 cdata->_compression = CM_off;
534
535 cdata->inc_image_modified();
536 cdata->inc_properties_modified();
537
538 PTA_uchar image = do_make_ram_image(cdata);
539 cdata->_keep_ram_image = true;
540
541 unsigned char *p = image;
542 for (int xi = 0; xi < 256; ++xi) {
543 *p++ = xi;
544 }
545}
546
547/**
548 * Reads the named filename into the texture.
549 */
551read(const Filename &fullpath, const LoaderOptions &options) {
552 CDWriter cdata(_cycler, true);
553 do_clear(cdata);
554 cdata->inc_properties_modified();
555 cdata->inc_image_modified();
556 return do_read(cdata, fullpath, Filename(), 0, 0, 0, 0, false, false,
557 options, nullptr);
558}
559
560/**
561 * Combine a 3-component image with a grayscale image to get a 4-component
562 * image.
563 *
564 * See the description of the full-parameter read() method for the meaning of
565 * the primary_file_num_channels and alpha_file_channel parameters.
566 */
568read(const Filename &fullpath, const Filename &alpha_fullpath,
569 int primary_file_num_channels, int alpha_file_channel,
570 const LoaderOptions &options) {
571 CDWriter cdata(_cycler, true);
572 do_clear(cdata);
573 cdata->inc_properties_modified();
574 cdata->inc_image_modified();
575 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
576 alpha_file_channel, 0, 0, false, false,
577 options, nullptr);
578}
579
580/**
581 * Reads a single file into a single page or mipmap level, or automatically
582 * reads a series of files into a series of pages and/or mipmap levels.
583 *
584 * See the description of the full-parameter read() method for the meaning of
585 * the various parameters.
586 */
588read(const Filename &fullpath, int z, int n,
589 bool read_pages, bool read_mipmaps,
590 const LoaderOptions &options) {
591 CDWriter cdata(_cycler, true);
592 cdata->inc_properties_modified();
593 cdata->inc_image_modified();
594 return do_read(cdata, fullpath, Filename(), 0, 0, z, n, read_pages, read_mipmaps,
595 options, nullptr);
596}
597
598/**
599 * Reads the texture from the indicated filename. If
600 * primary_file_num_channels is not 0, it specifies the number of components
601 * to downgrade the image to if it is greater than this number.
602 *
603 * If the filename has the extension .txo, this implicitly reads a texture
604 * object instead of a filename (which replaces all of the texture
605 * properties). In this case, all the rest of the parameters are ignored, and
606 * the filename should not contain any hash marks; just the one named file
607 * will be read, since a single .txo file can contain all pages and mipmaps
608 * necessary to define a texture.
609 *
610 * If alpha_fullpath is not empty, it specifies the name of a file from which
611 * to retrieve the alpha. In this case, alpha_file_channel represents the
612 * numeric channel of this image file to use as the resulting texture's alpha
613 * channel; usually, this is 0 to indicate the grayscale combination of r, g,
614 * b; or it may be a one-based channel number, e.g. 1 for the red channel, 2
615 * for the green channel, and so on.
616 *
617 * If read pages is false, then z indicates the page number into which this
618 * image will be assigned. Normally this is 0 for the first (or only) page of
619 * the texture. 3-D textures have one page for each level of depth, and cube
620 * map textures always have six pages.
621 *
622 * If read_pages is true, multiple images will be read at once, one for each
623 * page of a cube map or a 3-D texture. In this case, the filename should
624 * contain a sequence of one or more hash marks ("#") which will be filled in
625 * with the z value of each page, zero-based. In this case, the z parameter
626 * indicates the maximum z value that will be loaded, or 0 to load all
627 * filenames that exist.
628 *
629 * If read_mipmaps is false, then n indicates the mipmap level to which this
630 * image will be assigned. Normally this is 0 for the base texture image, but
631 * it is possible to load custom mipmap levels into the later images. After
632 * the base texture image is loaded (thus defining the size of the texture),
633 * you can call get_expected_num_mipmap_levels() to determine the maximum
634 * sensible value for n.
635 *
636 * If read_mipmaps is true, multiple images will be read as above, but this
637 * time the images represent the different mipmap levels of the texture image.
638 * In this case, the n parameter indicates the maximum n value that will be
639 * loaded, or 0 to load all filenames that exist (up to the expected number of
640 * mipmap levels).
641 *
642 * If both read_pages and read_mipmaps is true, then both sequences will be
643 * read; the filename should contain two sequences of hash marks, separated by
644 * some character such as a hyphen, underscore, or dot. The first hash mark
645 * sequence will be filled in with the mipmap level, while the second hash
646 * mark sequence will be the page index.
647 *
648 * This method implicitly sets keep_ram_image to false.
649 */
651read(const Filename &fullpath, const Filename &alpha_fullpath,
652 int primary_file_num_channels, int alpha_file_channel,
653 int z, int n, bool read_pages, bool read_mipmaps,
654 BamCacheRecord *record,
655 const LoaderOptions &options) {
656 CDWriter cdata(_cycler, true);
657 cdata->inc_properties_modified();
658 cdata->inc_image_modified();
659 return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
660 alpha_file_channel, z, n, read_pages, read_mipmaps,
661 options, record);
662}
663
664/**
665 * Estimates the amount of texture memory that will be consumed by loading
666 * this texture. This returns a value that is not specific to any particular
667 * graphics card or driver; it tries to make a reasonable assumption about how
668 * a driver will load the texture. It does not account for texture
669 * compression or anything fancy. This is mainly useful for debugging and
670 * reporting purposes.
671 *
672 * Returns a value in bytes.
673 */
676 CDReader cdata(_cycler);
677 size_t pixels = cdata->_x_size * cdata->_y_size * cdata->_z_size;
678
679 size_t bpp = 0;
680 switch (cdata->_format) {
681 case Texture::F_rgb332:
682 bpp = 1;
683 break;
684
685 case Texture::F_alpha:
686 case Texture::F_red:
687 case Texture::F_green:
688 case Texture::F_blue:
689 case Texture::F_luminance:
690 case Texture::F_sluminance:
691 case Texture::F_r8i:
692 bpp = 1;
693 break;
694
695 case Texture::F_luminance_alpha:
696 case Texture::F_luminance_alphamask:
697 case Texture::F_sluminance_alpha:
698 case Texture::F_rgba4:
699 case Texture::F_rgb5:
700 case Texture::F_rgba5:
701 case Texture::F_rg:
702 bpp = 2;
703 break;
704
705 case Texture::F_rgba:
706 case Texture::F_rgbm:
707 case Texture::F_rgb:
708 case Texture::F_srgb:
709 // Most of the above formats have only 3 bytes, but they are most likely
710 // to get padded by the driver
711 bpp = 4;
712 break;
713
714 case Texture::F_color_index:
715 case Texture::F_rgb8:
716 case Texture::F_rgba8:
717 case Texture::F_srgb_alpha:
718 case Texture::F_rgb8i:
719 case Texture::F_rgba8i:
720 bpp = 4;
721 break;
722
723 case Texture::F_depth_stencil:
724 bpp = 4;
725 break;
726
727 case Texture::F_depth_component:
728 case Texture::F_depth_component16:
729 bpp = 2;
730 break;
731
732 case Texture::F_depth_component24: // Gets padded
733 case Texture::F_depth_component32:
734 bpp = 4;
735 break;
736
737 case Texture::F_rgba12:
738 case Texture::F_rgb12:
739 bpp = 8;
740 break;
741
742 case Texture::F_rgba32:
743 case Texture::F_rgba32i:
744 bpp = 16;
745 break;
746
747 case Texture::F_r16:
748 case Texture::F_r16i:
749 case Texture::F_rg8i:
750 bpp = 2;
751 break;
752 case Texture::F_rg16:
753 case Texture::F_rg16i:
754 bpp = 4;
755 break;
756 case Texture::F_rgb16:
757 case Texture::F_rgb16i:
758 case Texture::F_rgba16:
759 case Texture::F_rgba16i:
760 bpp = 8;
761 break;
762
763 case Texture::F_r32i:
764 case Texture::F_r32:
765 bpp = 4;
766 break;
767
768 case Texture::F_rg32:
769 case Texture::F_rg32i:
770 bpp = 8;
771 break;
772
773 case Texture::F_rgb32:
774 case Texture::F_rgb32i:
775 bpp = 16;
776 break;
777
778 case Texture::F_r11_g11_b10:
779 case Texture::F_rgb9_e5:
780 case Texture::F_rgb10_a2:
781 bpp = 4;
782 break;
783 }
784
785 if (bpp == 0) {
786 bpp = 4;
787 gobj_cat.warning() << "Unhandled format in estimate_texture_memory(): "
788 << cdata->_format << "\n";
789 }
790
791 size_t bytes = pixels * bpp;
792 if (uses_mipmaps()) {
793 bytes = (bytes * 4) / 3;
794 }
795
796 return bytes;
797}
798
799/**
800 * Records an arbitrary object in the Texture, associated with a specified
801 * key. The object may later be retrieved by calling get_aux_data() with the
802 * same key.
803 *
804 * These data objects are not recorded to a bam or txo file.
805 */
806void Texture::
807set_aux_data(const string &key, TypedReferenceCount *aux_data) {
808 MutexHolder holder(_lock);
809 _aux_data[key] = aux_data;
810}
811
812/**
813 * Removes a record previously recorded via set_aux_data().
814 */
815void Texture::
816clear_aux_data(const string &key) {
817 MutexHolder holder(_lock);
818 _aux_data.erase(key);
819}
820
821/**
822 * Returns a record previously recorded via set_aux_data(). Returns NULL if
823 * there was no record associated with the indicated key.
824 */
826get_aux_data(const string &key) const {
827 MutexHolder holder(_lock);
828 AuxData::const_iterator di;
829 di = _aux_data.find(key);
830 if (di != _aux_data.end()) {
831 return (*di).second;
832 }
833 return nullptr;
834}
835
836/**
837 * Reads the texture from a Panda texture object. This defines the complete
838 * Texture specification, including the image data as well as all texture
839 * properties. This only works if the txo file contains a static Texture
840 * image, as opposed to a subclass of Texture such as a movie texture.
841 *
842 * Pass a real filename if it is available, or empty string if it is not.
843 */
845read_txo(istream &in, const string &filename) {
846 CDWriter cdata(_cycler, true);
847 cdata->inc_properties_modified();
848 cdata->inc_image_modified();
849 return do_read_txo(cdata, in, filename);
850}
851
852/**
853 * Constructs a new Texture object from the txo file. This is similar to
854 * Texture::read_txo(), but it constructs and returns a new object, which
855 * allows it to return a subclass of Texture (for instance, a movie texture).
856 *
857 * Pass a real filename if it is available, or empty string if it is not.
858 */
859PT(Texture) Texture::
860make_from_txo(istream &in, const string &filename) {
862
863 if (!din.open(in, filename)) {
864 gobj_cat.error()
865 << "Could not read texture object: " << filename << "\n";
866 return nullptr;
867 }
868
869 string head;
870 if (!din.read_header(head, _bam_header.size())) {
871 gobj_cat.error()
872 << filename << " is not a texture object file.\n";
873 return nullptr;
874 }
875
876 if (head != _bam_header) {
877 gobj_cat.error()
878 << filename << " is not a texture object file.\n";
879 return nullptr;
880 }
881
882 BamReader reader(&din);
883 if (!reader.init()) {
884 return nullptr;
885 }
886
887 TypedWritable *object = reader.read_object();
888
889 if (object != nullptr &&
890 object->is_exact_type(BamCacheRecord::get_class_type())) {
891 // Here's a special case: if the first object in the file is a
892 // BamCacheRecord, it's really a cache data file and not a true txo file;
893 // but skip over the cache data record and let the user treat it like an
894 // ordinary txo file.
895 object = reader.read_object();
896 }
897
898 if (object == nullptr) {
899 gobj_cat.error()
900 << "Texture object " << filename << " is empty.\n";
901 return nullptr;
902
903 } else if (!object->is_of_type(Texture::get_class_type())) {
904 gobj_cat.error()
905 << "Texture object " << filename << " contains a "
906 << object->get_type() << ", not a Texture.\n";
907 return nullptr;
908 }
909
910 PT(Texture) other = DCAST(Texture, object);
911 if (!reader.resolve()) {
912 gobj_cat.error()
913 << "Unable to fully resolve texture object file.\n";
914 return nullptr;
915 }
916
917 return other;
918}
919
920/**
921 * Writes the texture to a Panda texture object. This defines the complete
922 * Texture specification, including the image data as well as all texture
923 * properties.
924 *
925 * The filename is just for reference.
926 */
928write_txo(ostream &out, const string &filename) const {
929 CDReader cdata(_cycler);
930 return do_write_txo(cdata, out, filename);
931}
932
933/**
934 * Reads the texture from a DDS file object. This is a Microsoft-defined file
935 * format; it is similar in principle to a txo object, in that it is designed
936 * to contain the texture image in a form as similar as possible to its
937 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
938 * on.
939 *
940 * As with read_txo, the filename is just for reference.
941 */
943read_dds(istream &in, const string &filename, bool header_only) {
944 CDWriter cdata(_cycler, true);
945 cdata->inc_properties_modified();
946 cdata->inc_image_modified();
947 return do_read_dds(cdata, in, filename, header_only);
948}
949
950/**
951 * Reads the texture from a KTX file object. This is a Khronos-defined file
952 * format; it is similar in principle to a dds object, in that it is designed
953 * to contain the texture image in a form as similar as possible to its
954 * runtime image, and it can contain mipmaps, pre-compressed textures, and so
955 * on.
956 *
957 * As with read_dds, the filename is just for reference.
958 */
960read_ktx(istream &in, const string &filename, bool header_only) {
961 CDWriter cdata(_cycler, true);
962 cdata->inc_properties_modified();
963 cdata->inc_image_modified();
964 return do_read_ktx(cdata, in, filename, header_only);
965}
966
967/**
968 * Loads a texture whose filename is derived by concatenating a suffix to the
969 * filename of this texture. May return NULL, for example, if this texture
970 * doesn't have a filename.
971 */
973load_related(const InternalName *suffix) const {
974 MutexHolder holder(_lock);
975 CDReader cdata(_cycler);
976
977 RelatedTextures::const_iterator ti;
978 ti = _related_textures.find(suffix);
979 if (ti != _related_textures.end()) {
980 return (*ti).second;
981 }
982 if (cdata->_fullpath.empty()) {
983 return nullptr;
984 }
985 Filename main = cdata->_fullpath;
986 main.set_basename_wo_extension(main.get_basename_wo_extension() +
987 suffix->get_name());
988 PT(Texture) res;
989 if (!cdata->_alpha_fullpath.empty()) {
990 Filename alph = cdata->_alpha_fullpath;
992 suffix->get_name());
994 if (vfs->exists(alph)) {
995 // The alpha variant of the filename, with the suffix, exists. Use it
996 // to load the texture.
997 res = TexturePool::load_texture(main, alph,
998 cdata->_primary_file_num_channels,
999 cdata->_alpha_file_channel, false);
1000 } else {
1001 // If the alpha variant of the filename doesn't exist, just go ahead and
1002 // load the related texture without alpha.
1003 res = TexturePool::load_texture(main);
1004 }
1005
1006 } else {
1007 // No alpha filename--just load the single file. It doesn't necessarily
1008 // have the same number of channels as this one.
1009 res = TexturePool::load_texture(main);
1010 }
1011
1012 // I'm casting away the const-ness of 'this' because this field is only a
1013 // cache.
1014 ((Texture *)this)->_related_textures.insert(RelatedTextures::value_type(suffix, res));
1015 return res;
1016}
1017
1018/**
1019 * Replaces the current system-RAM image with the new data, converting it
1020 * first if necessary from the indicated component-order format. See
1021 * get_ram_image_as() for specifications about the format. This method cannot
1022 * support compressed image data or sub-pages; use set_ram_image() for that.
1023 */
1025set_ram_image_as(CPTA_uchar image, const string &supplied_format) {
1026 CDWriter cdata(_cycler, true);
1027
1028 string format = upcase(supplied_format);
1029
1030 // Make sure we can grab something that's uncompressed.
1031 size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
1032 (size_t)cdata->_z_size * (size_t)cdata->_num_views;
1033 nassertv(image.size() == (size_t)(cdata->_component_width * format.size() * imgsize));
1034
1035 // Check if the format is already what we have internally.
1036 if ((cdata->_num_components == 1 && format.size() == 1) ||
1037 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
1038 (cdata->_num_components == 3 && format == "BGR") ||
1039 (cdata->_num_components == 4 && format == "BGRA")) {
1040 // The format string is already our format, so we just need to copy it.
1041 do_set_ram_image(cdata, image);
1042 return;
1043 }
1044
1045 // Create a new empty array that can hold our image.
1046 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * cdata->_num_components * cdata->_component_width, get_class_type());
1047
1048 // These ifs are for optimization of commonly used image types.
1049 if (cdata->_component_width == 1) {
1050 if (format == "RGBA" && cdata->_num_components == 4) {
1051 imgsize *= 4;
1052 for (int p = 0; p < imgsize; p += 4) {
1053 newdata[p + 2] = image[p ];
1054 newdata[p + 1] = image[p + 1];
1055 newdata[p ] = image[p + 2];
1056 newdata[p + 3] = image[p + 3];
1057 }
1058 do_set_ram_image(cdata, newdata);
1059 return;
1060 }
1061 if (format == "RGB" && cdata->_num_components == 3) {
1062 imgsize *= 3;
1063 for (int p = 0; p < imgsize; p += 3) {
1064 newdata[p + 2] = image[p ];
1065 newdata[p + 1] = image[p + 1];
1066 newdata[p ] = image[p + 2];
1067 }
1068 do_set_ram_image(cdata, newdata);
1069 return;
1070 }
1071 if (format == "A" && cdata->_num_components != 3) {
1072 // We can generally rely on alpha to be the last component.
1073 int component = cdata->_num_components - 1;
1074 for (size_t p = 0; p < imgsize; ++p) {
1075 newdata[component] = image[p];
1076 }
1077 do_set_ram_image(cdata, newdata);
1078 return;
1079 }
1080 for (size_t p = 0; p < imgsize; ++p) {
1081 for (uchar s = 0; s < format.size(); ++s) {
1082 signed char component = -1;
1083 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1084 component = 0;
1085 } else if (format.at(s) == 'G') {
1086 component = 1;
1087 } else if (format.at(s) == 'R') {
1088 component = 2;
1089 } else if (format.at(s) == 'A') {
1090 if (cdata->_num_components != 3) {
1091 component = cdata->_num_components - 1;
1092 } else {
1093 // Ignore.
1094 }
1095 } else if (format.at(s) == '0') {
1096 // Ignore.
1097 } else if (format.at(s) == '1') {
1098 // Ignore.
1099 } else {
1100 gobj_cat.error() << "Unexpected component character '"
1101 << format.at(s) << "', expected one of RGBA!\n";
1102 return;
1103 }
1104 if (component >= 0) {
1105 newdata[p * cdata->_num_components + component] = image[p * format.size() + s];
1106 }
1107 }
1108 }
1109 do_set_ram_image(cdata, newdata);
1110 return;
1111 }
1112 for (size_t p = 0; p < imgsize; ++p) {
1113 for (uchar s = 0; s < format.size(); ++s) {
1114 signed char component = -1;
1115 if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1116 component = 0;
1117 } else if (format.at(s) == 'G') {
1118 component = 1;
1119 } else if (format.at(s) == 'R') {
1120 component = 2;
1121 } else if (format.at(s) == 'A') {
1122 if (cdata->_num_components != 3) {
1123 component = cdata->_num_components - 1;
1124 } else {
1125 // Ignore.
1126 }
1127 } else if (format.at(s) == '0') {
1128 // Ignore.
1129 } else if (format.at(s) == '1') {
1130 // Ignore.
1131 } else {
1132 gobj_cat.error() << "Unexpected component character '"
1133 << format.at(s) << "', expected one of RGBA!\n";
1134 return;
1135 }
1136 if (component >= 0) {
1137 memcpy((void*)(newdata + (p * cdata->_num_components + component) * cdata->_component_width),
1138 (void*)(image + (p * format.size() + s) * cdata->_component_width),
1139 cdata->_component_width);
1140 }
1141 }
1142 }
1143 do_set_ram_image(cdata, newdata);
1144 return;
1145}
1146
1147/**
1148 * Returns the flag that indicates whether this Texture is eligible to have
1149 * its main RAM copy of the texture memory dumped when the texture is prepared
1150 * for rendering. See set_keep_ram_image().
1151 */
1152bool Texture::
1153get_keep_ram_image() const {
1154 CDReader cdata(_cycler);
1155 return cdata->_keep_ram_image;
1156}
1157
1158/**
1159 * Returns true if there is enough information in this Texture object to write
1160 * it to the bam cache successfully, false otherwise. For most textures, this
1161 * is the same as has_ram_image().
1162 */
1163bool Texture::
1164is_cacheable() const {
1165 CDReader cdata(_cycler);
1166 return do_has_bam_rawdata(cdata);
1167}
1168
1169/**
1170 * Returns the number of contiguous mipmap levels that exist in RAM, up until
1171 * the first gap in the sequence. It is guaranteed that at least mipmap
1172 * levels [0, get_num_ram_mipmap_images()) exist.
1173 *
1174 * The number returned will never exceed the number of required mipmap images
1175 * based on the size of the texture and its filter mode.
1176 *
1177 * This method is different from get_num_ram_mipmap_images() in that it
1178 * returns only the number of mipmap levels that can actually be usefully
1179 * loaded, regardless of the actual number that may be stored.
1180 */
1181int Texture::
1183 CDReader cdata(_cycler);
1184 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
1185 // If we don't even have a base image, the answer is none.
1186 return 0;
1187 }
1188 if (!uses_mipmaps()) {
1189 // If we have a base image and don't require mipmapping, the answer is 1.
1190 return 1;
1191 }
1192
1193 // Check that we have enough mipmap levels to meet the size requirements.
1194 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
1195 int n = 0;
1196 int x = 1;
1197 while (x < size) {
1198 x = (x << 1);
1199 ++n;
1200 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
1201 return n;
1202 }
1203 }
1204
1205 ++n;
1206 return n;
1207}
1208
1209/**
1210 * Returns the system-RAM image data associated with the nth mipmap level, if
1211 * present. Returns NULL if the nth mipmap level is not present.
1212 */
1214get_ram_mipmap_image(int n) const {
1215 CDReader cdata(_cycler);
1216 if (n < (int)cdata->_ram_images.size() && !cdata->_ram_images[n]._image.empty()) {
1217 return cdata->_ram_images[n]._image;
1218 }
1219 return CPTA_uchar(get_class_type());
1220}
1221
1222/**
1223 * Similiar to get_ram_mipmap_image(), however, in this case the void pointer
1224 * for the given ram image is returned. This will be NULL unless it has been
1225 * explicitly set.
1226 */
1228get_ram_mipmap_pointer(int n) const {
1229 CDReader cdata(_cycler);
1230 if (n < (int)cdata->_ram_images.size()) {
1231 return cdata->_ram_images[n]._pointer_image;
1232 }
1233 return nullptr;
1234}
1235
1236/**
1237 * Sets an explicit void pointer as the texture's mipmap image for the
1238 * indicated level. This is a special call to direct a texture to reference
1239 * some external image location, for instance from a webcam input.
1240 *
1241 * The texture will henceforth reference this pointer directly, instead of its
1242 * own internal storage; the user is responsible for ensuring the data at this
1243 * address remains allocated and valid, and in the correct format, during the
1244 * lifetime of the texture.
1245 */
1247set_ram_mipmap_pointer(int n, void *image, size_t page_size) {
1248 CDWriter cdata(_cycler, true);
1249 nassertv(cdata->_ram_image_compression != CM_off || do_get_expected_ram_mipmap_image_size(cdata, n));
1250
1251 while (n >= (int)cdata->_ram_images.size()) {
1252 cdata->_ram_images.push_back(RamImage());
1253 }
1254
1255 cdata->_ram_images[n]._page_size = page_size;
1256 // _ram_images[n]._image.clear(); wtf is going on?!
1257 cdata->_ram_images[n]._pointer_image = image;
1258 cdata->inc_image_modified();
1259}
1260
1261/**
1262 * Accepts a raw pointer cast as an int, which is then passed to
1263 * set_ram_mipmap_pointer(); see the documentation for that method.
1264 *
1265 * This variant is particularly useful to set an external pointer from a
1266 * language like Python, which doesn't support void pointers directly.
1267 */
1269set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size) {
1270 set_ram_mipmap_pointer(n, (void*)pointer, (size_t)page_size);
1271}
1272
1273/**
1274 * Discards the current system-RAM image for the nth mipmap level.
1275 */
1278 CDWriter cdata(_cycler, true);
1279 if (n >= (int)cdata->_ram_images.size()) {
1280 return;
1281 }
1282 cdata->_ram_images[n]._page_size = 0;
1283 cdata->_ram_images[n]._image.clear();
1284 cdata->_ram_images[n]._pointer_image = nullptr;
1285}
1286
1287/**
1288 * Returns a modifiable pointer to the internal "simple" texture image. See
1289 * set_simple_ram_image().
1290 */
1291PTA_uchar Texture::
1293 CDWriter cdata(_cycler, true);
1294 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1295 return cdata->_simple_ram_image._image;
1296}
1297
1298/**
1299 * Creates an empty array for the simple ram image of the indicated size, and
1300 * returns a modifiable pointer to the new array. See set_simple_ram_image().
1301 */
1302PTA_uchar Texture::
1303new_simple_ram_image(int x_size, int y_size) {
1304 CDWriter cdata(_cycler, true);
1305 nassertr(cdata->_texture_type == TT_2d_texture, PTA_uchar());
1306 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1307
1308 cdata->_simple_x_size = x_size;
1309 cdata->_simple_y_size = y_size;
1310 cdata->_simple_ram_image._image = PTA_uchar::empty_array(expected_page_size);
1311 cdata->_simple_ram_image._page_size = expected_page_size;
1312 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1313 cdata->inc_simple_image_modified();
1314
1315 return cdata->_simple_ram_image._image;
1316}
1317
1318/**
1319 * Computes the "simple" ram image by loading the main RAM image, if it is not
1320 * already available, and reducing it to 16x16 or smaller. This may be an
1321 * expensive operation.
1322 */
1325 CDWriter cdata(_cycler, true);
1326
1327 if (cdata->_texture_type != TT_2d_texture ||
1328 cdata->_ram_image_compression != CM_off) {
1329 return;
1330 }
1331
1332 PNMImage pnmimage;
1333 if (!do_store_one(cdata, pnmimage, 0, 0)) {
1334 return;
1335 }
1336
1337 // Start at the suggested size from the config file.
1338 int x_size = simple_image_size.get_word(0);
1339 int y_size = simple_image_size.get_word(1);
1340
1341 // Limit it to no larger than the source image, and also make it a power of
1342 // two.
1343 x_size = down_to_power_2(min(x_size, cdata->_x_size));
1344 y_size = down_to_power_2(min(y_size, cdata->_y_size));
1345
1346 // Generate a reduced image of that size.
1347 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels());
1348 scaled.quick_filter_from(pnmimage);
1349
1350 // Make sure the reduced image has 4 components, by convention.
1351 if (!scaled.has_alpha()) {
1352 scaled.add_alpha();
1353 scaled.alpha_fill(1.0);
1354 }
1355 scaled.set_num_channels(4);
1356
1357 // Now see if we can go even smaller.
1358 bool did_anything;
1359 do {
1360 did_anything = false;
1361
1362 // Try to reduce X.
1363 if (x_size > 1) {
1364 int new_x_size = (x_size >> 1);
1365 PNMImage smaller(new_x_size, y_size, 4);
1366 smaller.quick_filter_from(scaled);
1367 PNMImage bigger(x_size, y_size, 4);
1368 bigger.quick_filter_from(smaller);
1369
1370 if (compare_images(scaled, bigger)) {
1371 scaled.take_from(smaller);
1372 x_size = new_x_size;
1373 did_anything = true;
1374 }
1375 }
1376
1377 // Try to reduce Y.
1378 if (y_size > 1) {
1379 int new_y_size = (y_size >> 1);
1380 PNMImage smaller(x_size, new_y_size, 4);
1381 smaller.quick_filter_from(scaled);
1382 PNMImage bigger(x_size, y_size, 4);
1383 bigger.quick_filter_from(smaller);
1384
1385 if (compare_images(scaled, bigger)) {
1386 scaled.take_from(smaller);
1387 y_size = new_y_size;
1388 did_anything = true;
1389 }
1390 }
1391 } while (did_anything);
1392
1393 size_t expected_page_size = (size_t)(x_size * y_size * 4);
1394 PTA_uchar image = PTA_uchar::empty_array(expected_page_size, get_class_type());
1395 convert_from_pnmimage(image, expected_page_size, x_size, 0, 0, 0, scaled, 4, 1);
1396
1397 do_set_simple_ram_image(cdata, image, x_size, y_size);
1398 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1399}
1400
1401/**
1402 * Returns a TexturePeeker object that can be used to examine the individual
1403 * texels stored within this Texture by (u, v) coordinate.
1404 *
1405 * If the texture has a ram image resident, that image is used. If it does
1406 * not have a full ram image but does have a simple_ram_image resident, that
1407 * image is used instead. If neither image is resident the full image is
1408 * reloaded.
1409 *
1410 * Returns NULL if the texture cannot find an image to load, or the texture
1411 * format is incompatible.
1412 */
1413PT(TexturePeeker) Texture::
1414peek() {
1415 CDWriter cdata(_cycler, unlocked_ensure_ram_image(true));
1416
1417 PT(TexturePeeker) peeker = new TexturePeeker(this, cdata);
1418 if (peeker->is_valid()) {
1419 return peeker;
1420 }
1421
1422 return nullptr;
1423}
1424
1425/**
1426 * Indicates that the texture should be enqueued to be prepared in the
1427 * indicated prepared_objects at the beginning of the next frame. This will
1428 * ensure the texture is already loaded into texture memory if it is expected
1429 * to be rendered soon.
1430 *
1431 * Use this function instead of prepare_now() to preload textures from a user
1432 * interface standpoint.
1433 */
1434PT(AsyncFuture) Texture::
1435prepare(PreparedGraphicsObjects *prepared_objects) {
1436 return prepared_objects->enqueue_texture_future(this);
1437}
1438
1439/**
1440 * Returns true if the texture has already been prepared or enqueued for
1441 * preparation on the indicated GSG, false otherwise.
1442 */
1444is_prepared(PreparedGraphicsObjects *prepared_objects) const {
1445 MutexHolder holder(_lock);
1446 PreparedViews::const_iterator pvi;
1447 pvi = _prepared_views.find(prepared_objects);
1448 if (pvi != _prepared_views.end()) {
1449 return true;
1450 }
1451 return prepared_objects->is_texture_queued(this);
1452}
1453
1454/**
1455 * Returns true if the texture needs to be re-loaded onto the indicated GSG,
1456 * either because its image data is out-of-date, or because it's not fully
1457 * prepared now.
1458 */
1460was_image_modified(PreparedGraphicsObjects *prepared_objects) const {
1461 MutexHolder holder(_lock);
1462 CDReader cdata(_cycler);
1463
1464 PreparedViews::const_iterator pvi;
1465 pvi = _prepared_views.find(prepared_objects);
1466 if (pvi != _prepared_views.end()) {
1467 const Contexts &contexts = (*pvi).second;
1468 for (int view = 0; view < cdata->_num_views; ++view) {
1469 Contexts::const_iterator ci;
1470 ci = contexts.find(view);
1471 if (ci == contexts.end()) {
1472 return true;
1473 }
1474 TextureContext *tc = (*ci).second;
1475 if (tc->was_image_modified()) {
1476 return true;
1477 }
1478 }
1479 return false;
1480 }
1481 return true;
1482}
1483
1484/**
1485 * Returns the number of bytes which the texture is reported to consume within
1486 * graphics memory, for the indicated GSG. This may return a nonzero value
1487 * even if the texture is not currently resident; you should also check
1488 * get_resident() if you want to know how much space the texture is actually
1489 * consuming right now.
1490 */
1492get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const {
1493 MutexHolder holder(_lock);
1494 CDReader cdata(_cycler);
1495
1496 PreparedViews::const_iterator pvi;
1497 size_t total_size = 0;
1498 pvi = _prepared_views.find(prepared_objects);
1499 if (pvi != _prepared_views.end()) {
1500 const Contexts &contexts = (*pvi).second;
1501 for (int view = 0; view < cdata->_num_views; ++view) {
1502 Contexts::const_iterator ci;
1503 ci = contexts.find(view);
1504 if (ci != contexts.end()) {
1505 TextureContext *tc = (*ci).second;
1506 total_size += tc->get_data_size_bytes();
1507 }
1508 }
1509 }
1510
1511 return total_size;
1512}
1513
1514/**
1515 * Returns true if this Texture was rendered in the most recent frame within
1516 * the indicated GSG.
1517 */
1519get_active(PreparedGraphicsObjects *prepared_objects) const {
1520 MutexHolder holder(_lock);
1521 CDReader cdata(_cycler);
1522
1523 PreparedViews::const_iterator pvi;
1524 pvi = _prepared_views.find(prepared_objects);
1525 if (pvi != _prepared_views.end()) {
1526 const Contexts &contexts = (*pvi).second;
1527 for (int view = 0; view < cdata->_num_views; ++view) {
1528 Contexts::const_iterator ci;
1529 ci = contexts.find(view);
1530 if (ci != contexts.end()) {
1531 TextureContext *tc = (*ci).second;
1532 if (tc->get_active()) {
1533 return true;
1534 }
1535 }
1536 }
1537 }
1538 return false;
1539}
1540
1541/**
1542 * Returns true if this Texture is reported to be resident within graphics
1543 * memory for the indicated GSG.
1544 */
1546get_resident(PreparedGraphicsObjects *prepared_objects) const {
1547 MutexHolder holder(_lock);
1548 CDReader cdata(_cycler);
1549
1550 PreparedViews::const_iterator pvi;
1551 pvi = _prepared_views.find(prepared_objects);
1552 if (pvi != _prepared_views.end()) {
1553 const Contexts &contexts = (*pvi).second;
1554 for (int view = 0; view < cdata->_num_views; ++view) {
1555 Contexts::const_iterator ci;
1556 ci = contexts.find(view);
1557 if (ci != contexts.end()) {
1558 TextureContext *tc = (*ci).second;
1559 if (tc->get_resident()) {
1560 return true;
1561 }
1562 }
1563 }
1564 }
1565 return false;
1566}
1567
1568/**
1569 * Frees the texture context only on the indicated object, if it exists there.
1570 * Returns true if it was released, false if it had not been prepared.
1571 */
1573release(PreparedGraphicsObjects *prepared_objects) {
1574 MutexHolder holder(_lock);
1575 PreparedViews::iterator pvi;
1576 pvi = _prepared_views.find(prepared_objects);
1577 if (pvi != _prepared_views.end()) {
1578 Contexts temp;
1579 temp.swap((*pvi).second);
1580 Contexts::iterator ci;
1581 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1582 TextureContext *tc = (*ci).second;
1583 if (tc != nullptr) {
1584 prepared_objects->release_texture(tc);
1585 }
1586 }
1587 _prepared_views.erase(pvi);
1588 }
1589
1590 // Maybe it wasn't prepared yet, but it's about to be.
1591 return prepared_objects->dequeue_texture(this);
1592}
1593
1594/**
1595 * Frees the context allocated on all objects for which the texture has been
1596 * declared. Returns the number of contexts which have been freed.
1597 */
1599release_all() {
1600 MutexHolder holder(_lock);
1601
1602 // We have to traverse a copy of the _prepared_views list, because the
1603 // PreparedGraphicsObjects object will call clear_prepared() in response to
1604 // each release_texture(), and we don't want to be modifying the
1605 // _prepared_views list while we're traversing it.
1606 PreparedViews temp;
1607 temp.swap(_prepared_views);
1608 int num_freed = (int)temp.size();
1609
1610 PreparedViews::iterator pvi;
1611 for (pvi = temp.begin(); pvi != temp.end(); ++pvi) {
1612 PreparedGraphicsObjects *prepared_objects = (*pvi).first;
1613 Contexts temp;
1614 temp.swap((*pvi).second);
1615 Contexts::iterator ci;
1616 for (ci = temp.begin(); ci != temp.end(); ++ci) {
1617 TextureContext *tc = (*ci).second;
1618 if (tc != nullptr) {
1619 prepared_objects->release_texture(tc);
1620 }
1621 }
1622 }
1623
1624 return num_freed;
1625}
1626
1627/**
1628 * Not to be confused with write(Filename), this method simply describes the
1629 * texture properties.
1630 */
1632write(ostream &out, int indent_level) const {
1633 CDReader cdata(_cycler);
1634 indent(out, indent_level)
1635 << cdata->_texture_type << " " << get_name();
1636 if (!cdata->_filename.empty()) {
1637 out << " (from " << cdata->_filename << ")";
1638 }
1639 out << "\n";
1640
1641 indent(out, indent_level + 2);
1642
1643 switch (cdata->_texture_type) {
1644 case TT_1d_texture:
1645 out << "1-d, " << cdata->_x_size;
1646 break;
1647
1648 case TT_2d_texture:
1649 out << "2-d, " << cdata->_x_size << " x " << cdata->_y_size;
1650 break;
1651
1652 case TT_3d_texture:
1653 out << "3-d, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1654 break;
1655
1656 case TT_2d_texture_array:
1657 out << "2-d array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1658 break;
1659
1660 case TT_cube_map:
1661 out << "cube map, " << cdata->_x_size << " x " << cdata->_y_size;
1662 break;
1663
1664 case TT_cube_map_array:
1665 out << "cube map array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1666 break;
1667
1668 case TT_buffer_texture:
1669 out << "buffer, " << cdata->_x_size;
1670 break;
1671
1672 case TT_1d_texture_array:
1673 out << "1-d array, " << cdata->_x_size << " x " << cdata->_y_size;
1674 break;
1675 }
1676
1677 if (cdata->_num_views > 1) {
1678 out << " (x " << cdata->_num_views << " views)";
1679 }
1680
1681 out << " pixels, each " << cdata->_num_components;
1682
1683 switch (cdata->_component_type) {
1684 case T_unsigned_byte:
1685 case T_byte:
1686 out << " bytes";
1687 break;
1688
1689 case T_unsigned_short:
1690 case T_short:
1691 out << " shorts";
1692 break;
1693
1694 case T_half_float:
1695 out << " half";
1696 case T_float:
1697 out << " floats";
1698 break;
1699
1700 case T_unsigned_int_24_8:
1701 case T_int:
1702 case T_unsigned_int:
1703 out << " ints";
1704 break;
1705
1706 default:
1707 break;
1708 }
1709
1710 out << ", ";
1711 switch (cdata->_format) {
1712 case F_color_index:
1713 out << "color_index";
1714 break;
1715 case F_depth_stencil:
1716 out << "depth_stencil";
1717 break;
1718 case F_depth_component:
1719 out << "depth_component";
1720 break;
1721 case F_depth_component16:
1722 out << "depth_component16";
1723 break;
1724 case F_depth_component24:
1725 out << "depth_component24";
1726 break;
1727 case F_depth_component32:
1728 out << "depth_component32";
1729 break;
1730
1731 case F_rgba:
1732 out << "rgba";
1733 break;
1734 case F_rgbm:
1735 out << "rgbm";
1736 break;
1737 case F_rgba32:
1738 out << "rgba32";
1739 break;
1740 case F_rgba16:
1741 out << "rgba16";
1742 break;
1743 case F_rgba12:
1744 out << "rgba12";
1745 break;
1746 case F_rgba8:
1747 out << "rgba8";
1748 break;
1749 case F_rgba4:
1750 out << "rgba4";
1751 break;
1752
1753 case F_rgb:
1754 out << "rgb";
1755 break;
1756 case F_rgb12:
1757 out << "rgb12";
1758 break;
1759 case F_rgb8:
1760 out << "rgb8";
1761 break;
1762 case F_rgb5:
1763 out << "rgb5";
1764 break;
1765 case F_rgba5:
1766 out << "rgba5";
1767 break;
1768 case F_rgb332:
1769 out << "rgb332";
1770 break;
1771
1772 case F_red:
1773 out << "red";
1774 break;
1775 case F_green:
1776 out << "green";
1777 break;
1778 case F_blue:
1779 out << "blue";
1780 break;
1781 case F_alpha:
1782 out << "alpha";
1783 break;
1784 case F_luminance:
1785 out << "luminance";
1786 break;
1787 case F_luminance_alpha:
1788 out << "luminance_alpha";
1789 break;
1790 case F_luminance_alphamask:
1791 out << "luminance_alphamask";
1792 break;
1793
1794 case F_r16:
1795 out << "r16";
1796 break;
1797 case F_rg16:
1798 out << "rg16";
1799 break;
1800 case F_rgb16:
1801 out << "rgb16";
1802 break;
1803
1804 case F_srgb:
1805 out << "srgb";
1806 break;
1807 case F_srgb_alpha:
1808 out << "srgb_alpha";
1809 break;
1810 case F_sluminance:
1811 out << "sluminance";
1812 break;
1813 case F_sluminance_alpha:
1814 out << "sluminance_alpha";
1815 break;
1816
1817 case F_r32i:
1818 out << "r32i";
1819 break;
1820
1821 case F_r32:
1822 out << "r32";
1823 break;
1824 case F_rg32:
1825 out << "rg32";
1826 break;
1827 case F_rgb32:
1828 out << "rgb32";
1829 break;
1830
1831 case F_r8i:
1832 out << "r8i";
1833 break;
1834 case F_rg8i:
1835 out << "rg8i";
1836 break;
1837 case F_rgb8i:
1838 out << "rgb8i";
1839 break;
1840 case F_rgba8i:
1841 out << "rgba8i";
1842 break;
1843 case F_r11_g11_b10:
1844 out << "r11_g11_b10";
1845 break;
1846 case F_rgb9_e5:
1847 out << "rgb9_e5";
1848 break;
1849 case F_rgb10_a2:
1850 out << "rgb10_a2";
1851 break;
1852
1853 case F_rg:
1854 out << "rg";
1855 break;
1856
1857 case F_r16i:
1858 out << "r16i";
1859 break;
1860 case F_rg16i:
1861 out << "rg16i";
1862 break;
1863 case F_rgb16i:
1864 out << "rgb16i";
1865 break;
1866 case F_rgba16i:
1867 out << "rgba16i";
1868 break;
1869
1870 case F_rg32i:
1871 out << "rg32i";
1872 break;
1873 case F_rgb32i:
1874 out << "rgb32i";
1875 break;
1876 case F_rgba32i:
1877 out << "rgba32i";
1878 break;
1879 }
1880
1881 if (cdata->_compression != CM_default) {
1882 out << ", compression " << cdata->_compression;
1883 }
1884 out << "\n";
1885
1886 indent(out, indent_level + 2);
1887
1888 cdata->_default_sampler.output(out);
1889
1890 if (do_has_ram_image(cdata)) {
1891 indent(out, indent_level + 2)
1892 << do_get_ram_image_size(cdata) << " bytes in ram, compression "
1893 << cdata->_ram_image_compression << "\n";
1894
1895 if (cdata->_ram_images.size() > 1) {
1896 int count = 0;
1897 size_t total_size = 0;
1898 for (size_t n = 1; n < cdata->_ram_images.size(); ++n) {
1899 if (!cdata->_ram_images[n]._image.empty()) {
1900 ++count;
1901 total_size += cdata->_ram_images[n]._image.size();
1902 } else {
1903 // Stop at the first gap.
1904 break;
1905 }
1906 }
1907 indent(out, indent_level + 2)
1908 << count
1909 << " mipmap levels also present in ram (" << total_size
1910 << " bytes).\n";
1911 }
1912
1913 } else {
1914 indent(out, indent_level + 2)
1915 << "no ram image\n";
1916 }
1917
1918 if (!cdata->_simple_ram_image._image.empty()) {
1919 indent(out, indent_level + 2)
1920 << "simple image: " << cdata->_simple_x_size << " x "
1921 << cdata->_simple_y_size << ", "
1922 << cdata->_simple_ram_image._image.size() << " bytes\n";
1923 }
1924}
1925
1926
1927/**
1928 * Changes the size of the texture, padding if necessary, and setting the pad
1929 * region as well.
1930 */
1932set_size_padded(int x, int y, int z) {
1933 CDWriter cdata(_cycler, true);
1934 if (do_get_auto_texture_scale(cdata) != ATS_none) {
1935 do_set_x_size(cdata, up_to_power_2(x));
1936 do_set_y_size(cdata, up_to_power_2(y));
1937
1938 if (cdata->_texture_type == TT_3d_texture) {
1939 // Only pad 3D textures. It does not make sense to do so for cube maps
1940 // or 2D texture arrays.
1941 do_set_z_size(cdata, up_to_power_2(z));
1942 } else {
1943 do_set_z_size(cdata, z);
1944 }
1945 } else {
1946 do_set_x_size(cdata, x);
1947 do_set_y_size(cdata, y);
1948 do_set_z_size(cdata, z);
1949 }
1950 do_set_pad_size(cdata,
1951 cdata->_x_size - x,
1952 cdata->_y_size - y,
1953 cdata->_z_size - z);
1954}
1955
1956/**
1957 * Specifies the size of the texture as it exists in its original disk file,
1958 * before any Panda scaling.
1959 */
1961set_orig_file_size(int x, int y, int z) {
1962 CDWriter cdata(_cycler, true);
1963 cdata->_orig_file_x_size = x;
1964 cdata->_orig_file_y_size = y;
1965
1966 nassertv(z == cdata->_z_size);
1967}
1968
1969/**
1970 * Creates a context for the texture on the particular GSG, if it does not
1971 * already exist. Returns the new (or old) TextureContext. This assumes that
1972 * the GraphicsStateGuardian is the currently active rendering context and
1973 * that it is ready to accept new textures. If this is not necessarily the
1974 * case, you should use prepare() instead.
1975 *
1976 * Normally, this is not called directly except by the GraphicsStateGuardian;
1977 * a texture does not need to be explicitly prepared by the user before it may
1978 * be rendered.
1979 */
1981prepare_now(int view,
1982 PreparedGraphicsObjects *prepared_objects,
1984 MutexHolder holder(_lock);
1985 CDReader cdata(_cycler);
1986
1987 // Don't exceed the actual number of views.
1988 view = max(min(view, cdata->_num_views - 1), 0);
1989
1990 // Get the list of PreparedGraphicsObjects for this view.
1991 Contexts &contexts = _prepared_views[prepared_objects];
1992 Contexts::const_iterator pvi;
1993 pvi = contexts.find(view);
1994 if (pvi != contexts.end()) {
1995 return (*pvi).second;
1996 }
1997
1998 TextureContext *tc = prepared_objects->prepare_texture_now(this, view, gsg);
1999 contexts[view] = tc;
2000
2001 return tc;
2002}
2003
2004/**
2005 * Returns the smallest power of 2 greater than or equal to value.
2006 */
2008up_to_power_2(int value) {
2009 if (value <= 1) {
2010 return 1;
2011 }
2012 int bit = get_next_higher_bit(((unsigned int)value) - 1);
2013 return (1 << bit);
2014}
2015
2016/**
2017 * Returns the largest power of 2 less than or equal to value.
2018 */
2020down_to_power_2(int value) {
2021 if (value <= 1) {
2022 return 1;
2023 }
2024 int bit = get_next_higher_bit(((unsigned int)value) >> 1);
2025 return (1 << bit);
2026}
2027
2028/**
2029 * Asks the PNMImage to change its scale when it reads the image, according to
2030 * the whims of the Config.prc file.
2031 *
2032 * For most efficient results, this method should be called after
2033 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2034 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2035 * already loaded; in this case it will rescale the image on the spot. Also
2036 * see rescale_texture().
2037 */
2039consider_rescale(PNMImage &pnmimage) {
2040 consider_rescale(pnmimage, get_name(), get_auto_texture_scale());
2041}
2042
2043/**
2044 * Asks the PNMImage to change its scale when it reads the image, according to
2045 * the whims of the Config.prc file.
2046 *
2047 * For most efficient results, this method should be called after
2048 * pnmimage.read_header() has been called, but before pnmimage.read(). This
2049 * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2050 * already loaded; in this case it will rescale the image on the spot. Also
2051 * see rescale_texture().
2052 */
2054consider_rescale(PNMImage &pnmimage, const string &name, AutoTextureScale auto_texture_scale) {
2055 int new_x_size = pnmimage.get_x_size();
2056 int new_y_size = pnmimage.get_y_size();
2057 if (adjust_size(new_x_size, new_y_size, name, false, auto_texture_scale)) {
2058 if (pnmimage.is_valid()) {
2059 // The image is already loaded. Rescale on the spot.
2060 PNMImage new_image(new_x_size, new_y_size, pnmimage.get_num_channels(),
2061 pnmimage.get_maxval(), pnmimage.get_type(),
2062 pnmimage.get_color_space());
2063 new_image.quick_filter_from(pnmimage);
2064 pnmimage.take_from(new_image);
2065 } else {
2066 // Rescale while reading. Some image types (e.g. jpeg) can take
2067 // advantage of this.
2068 pnmimage.set_read_size(new_x_size, new_y_size);
2069 }
2070 }
2071}
2072
2073/**
2074 * Returns the indicated TextureType converted to a string word.
2075 */
2077format_texture_type(TextureType tt) {
2078 switch (tt) {
2079 case TT_1d_texture:
2080 return "1d_texture";
2081 case TT_2d_texture:
2082 return "2d_texture";
2083 case TT_3d_texture:
2084 return "3d_texture";
2085 case TT_2d_texture_array:
2086 return "2d_texture_array";
2087 case TT_cube_map:
2088 return "cube_map";
2089 case TT_cube_map_array:
2090 return "cube_map_array";
2091 case TT_buffer_texture:
2092 return "buffer_texture";
2093 case TT_1d_texture_array:
2094 return "1d_texture_array";
2095 }
2096 return "**invalid**";
2097}
2098
2099/**
2100 * Returns the TextureType corresponding to the indicated string word.
2101 */
2102Texture::TextureType Texture::
2103string_texture_type(const string &str) {
2104 if (cmp_nocase(str, "1d_texture") == 0) {
2105 return TT_1d_texture;
2106 } else if (cmp_nocase(str, "2d_texture") == 0) {
2107 return TT_2d_texture;
2108 } else if (cmp_nocase(str, "3d_texture") == 0) {
2109 return TT_3d_texture;
2110 } else if (cmp_nocase(str, "2d_texture_array") == 0) {
2111 return TT_2d_texture_array;
2112 } else if (cmp_nocase(str, "cube_map") == 0) {
2113 return TT_cube_map;
2114 } else if (cmp_nocase(str, "cube_map_array") == 0) {
2115 return TT_cube_map_array;
2116 } else if (cmp_nocase(str, "buffer_texture") == 0) {
2117 return TT_buffer_texture;
2118 }
2119
2120 gobj_cat->error()
2121 << "Invalid Texture::TextureType value: " << str << "\n";
2122 return TT_2d_texture;
2123}
2124
2125/**
2126 * Returns the indicated ComponentType converted to a string word.
2127 */
2129format_component_type(ComponentType ct) {
2130 switch (ct) {
2131 case T_unsigned_byte:
2132 return "unsigned_byte";
2133 case T_unsigned_short:
2134 return "unsigned_short";
2135 case T_float:
2136 return "float";
2137 case T_unsigned_int_24_8:
2138 return "unsigned_int_24_8";
2139 case T_int:
2140 return "int";
2141 case T_byte:
2142 return "unsigned_byte";
2143 case T_short:
2144 return "short";
2145 case T_half_float:
2146 return "half_float";
2147 case T_unsigned_int:
2148 return "unsigned_int";
2149 }
2150
2151 return "**invalid**";
2152}
2153
2154/**
2155 * Returns the ComponentType corresponding to the indicated string word.
2156 */
2157Texture::ComponentType Texture::
2158string_component_type(const string &str) {
2159 if (cmp_nocase(str, "unsigned_byte") == 0) {
2160 return T_unsigned_byte;
2161 } else if (cmp_nocase(str, "unsigned_short") == 0) {
2162 return T_unsigned_short;
2163 } else if (cmp_nocase(str, "float") == 0) {
2164 return T_float;
2165 } else if (cmp_nocase(str, "unsigned_int_24_8") == 0) {
2166 return T_unsigned_int_24_8;
2167 } else if (cmp_nocase(str, "int") == 0) {
2168 return T_int;
2169 } else if (cmp_nocase(str, "byte") == 0) {
2170 return T_byte;
2171 } else if (cmp_nocase(str, "short") == 0) {
2172 return T_short;
2173 } else if (cmp_nocase(str, "half_float") == 0) {
2174 return T_half_float;
2175 } else if (cmp_nocase(str, "unsigned_int") == 0) {
2176 return T_unsigned_int;
2177 }
2178
2179 gobj_cat->error()
2180 << "Invalid Texture::ComponentType value: " << str << "\n";
2181 return T_unsigned_byte;
2182}
2183
2184/**
2185 * Returns the indicated Format converted to a string word.
2186 */
2188format_format(Format format) {
2189 switch (format) {
2190 case F_depth_stencil:
2191 return "depth_stencil";
2192 case F_depth_component:
2193 return "depth_component";
2194 case F_depth_component16:
2195 return "depth_component16";
2196 case F_depth_component24:
2197 return "depth_component24";
2198 case F_depth_component32:
2199 return "depth_component32";
2200 case F_color_index:
2201 return "color_index";
2202 case F_red:
2203 return "red";
2204 case F_green:
2205 return "green";
2206 case F_blue:
2207 return "blue";
2208 case F_alpha:
2209 return "alpha";
2210 case F_rgb:
2211 return "rgb";
2212 case F_rgb5:
2213 return "rgb5";
2214 case F_rgb8:
2215 return "rgb8";
2216 case F_rgb12:
2217 return "rgb12";
2218 case F_rgb332:
2219 return "rgb332";
2220 case F_rgba:
2221 return "rgba";
2222 case F_rgbm:
2223 return "rgbm";
2224 case F_rgba4:
2225 return "rgba4";
2226 case F_rgba5:
2227 return "rgba5";
2228 case F_rgba8:
2229 return "rgba8";
2230 case F_rgba12:
2231 return "rgba12";
2232 case F_luminance:
2233 return "luminance";
2234 case F_luminance_alpha:
2235 return "luminance_alpha";
2236 case F_luminance_alphamask:
2237 return "luminance_alphamask";
2238 case F_rgba16:
2239 return "rgba16";
2240 case F_rgba32:
2241 return "rgba32";
2242 case F_r16:
2243 return "r16";
2244 case F_rg16:
2245 return "rg16";
2246 case F_rgb16:
2247 return "rgb16";
2248 case F_srgb:
2249 return "srgb";
2250 case F_srgb_alpha:
2251 return "srgb_alpha";
2252 case F_sluminance:
2253 return "sluminance";
2254 case F_sluminance_alpha:
2255 return "sluminance_alpha";
2256 case F_r32i:
2257 return "r32i";
2258 case F_r32:
2259 return "r32";
2260 case F_rg32:
2261 return "rg32";
2262 case F_rgb32:
2263 return "rgb32";
2264 case F_r8i:
2265 return "r8i";
2266 case F_rg8i:
2267 return "rg8i";
2268 case F_rgb8i:
2269 return "rgb8i";
2270 case F_rgba8i:
2271 return "rgba8i";
2272 case F_r11_g11_b10:
2273 return "r11g11b10";
2274 case F_rgb9_e5:
2275 return "rgb9_e5";
2276 case F_rgb10_a2:
2277 return "rgb10_a2";
2278 case F_rg:
2279 return "rg";
2280 case F_r16i:
2281 return "r16i";
2282 case F_rg16i:
2283 return "rg16i";
2284 case F_rgb16i:
2285 return "rgb16i";
2286 case F_rgba16i:
2287 return "rgba16i";
2288 case F_rg32i:
2289 return "rg32i";
2290 case F_rgb32i:
2291 return "rgb32i";
2292 case F_rgba32i:
2293 return "rgba32i";
2294 }
2295 return "**invalid**";
2296}
2297
2298/**
2299 * Returns the Format corresponding to the indicated string word.
2300 */
2301Texture::Format Texture::
2302string_format(const string &str) {
2303 if (cmp_nocase(str, "depth_stencil") == 0) {
2304 return F_depth_stencil;
2305 } else if (cmp_nocase(str, "depth_component") == 0) {
2306 return F_depth_component;
2307 } else if (cmp_nocase(str, "depth_component16") == 0 || cmp_nocase(str, "d16") == 0) {
2308 return F_depth_component16;
2309 } else if (cmp_nocase(str, "depth_component24") == 0 || cmp_nocase(str, "d24") == 0) {
2310 return F_depth_component24;
2311 } else if (cmp_nocase(str, "depth_component32") == 0 || cmp_nocase(str, "d32") == 0) {
2312 return F_depth_component32;
2313 } else if (cmp_nocase(str, "color_index") == 0) {
2314 return F_color_index;
2315 } else if (cmp_nocase(str, "red") == 0) {
2316 return F_red;
2317 } else if (cmp_nocase(str, "green") == 0) {
2318 return F_green;
2319 } else if (cmp_nocase(str, "blue") == 0) {
2320 return F_blue;
2321 } else if (cmp_nocase(str, "alpha") == 0) {
2322 return F_alpha;
2323 } else if (cmp_nocase(str, "rgb") == 0) {
2324 return F_rgb;
2325 } else if (cmp_nocase(str, "rgb5") == 0) {
2326 return F_rgb5;
2327 } else if (cmp_nocase(str, "rgb8") == 0 || cmp_nocase(str, "r8g8b8") == 0) {
2328 return F_rgb8;
2329 } else if (cmp_nocase(str, "rgb12") == 0) {
2330 return F_rgb12;
2331 } else if (cmp_nocase(str, "rgb332") == 0 || cmp_nocase(str, "r3g3b2") == 0) {
2332 return F_rgb332;
2333 } else if (cmp_nocase(str, "rgba") == 0) {
2334 return F_rgba;
2335 } else if (cmp_nocase(str, "rgbm") == 0) {
2336 return F_rgbm;
2337 } else if (cmp_nocase(str, "rgba4") == 0) {
2338 return F_rgba4;
2339 } else if (cmp_nocase(str, "rgba5") == 0) {
2340 return F_rgba5;
2341 } else if (cmp_nocase(str, "rgba8") == 0 || cmp_nocase(str, "r8g8b8a8") == 0) {
2342 return F_rgba8;
2343 } else if (cmp_nocase(str, "rgba12") == 0) {
2344 return F_rgba12;
2345 } else if (cmp_nocase(str, "luminance") == 0) {
2346 return F_luminance;
2347 } else if (cmp_nocase(str, "luminance_alpha") == 0) {
2348 return F_luminance_alpha;
2349 } else if (cmp_nocase(str, "luminance_alphamask") == 0) {
2350 return F_luminance_alphamask;
2351 } else if (cmp_nocase(str, "rgba16") == 0 || cmp_nocase(str, "r16g16b16a16") == 0) {
2352 return F_rgba16;
2353 } else if (cmp_nocase(str, "rgba32") == 0 || cmp_nocase(str, "r32g32b32a32") == 0) {
2354 return F_rgba32;
2355 } else if (cmp_nocase(str, "r16") == 0 || cmp_nocase(str, "red16") == 0) {
2356 return F_r16;
2357 } else if (cmp_nocase(str, "r16i") == 0) {
2358 return F_r16i;
2359 } else if (cmp_nocase(str, "rg16") == 0 || cmp_nocase(str, "r16g16") == 0) {
2360 return F_rg16;
2361 } else if (cmp_nocase(str, "rgb16") == 0 || cmp_nocase(str, "r16g16b16") == 0) {
2362 return F_rgb16;
2363 } else if (cmp_nocase(str, "srgb") == 0) {
2364 return F_srgb;
2365 } else if (cmp_nocase(str, "srgb_alpha") == 0) {
2366 return F_srgb_alpha;
2367 } else if (cmp_nocase(str, "sluminance") == 0) {
2368 return F_sluminance;
2369 } else if (cmp_nocase(str, "sluminance_alpha") == 0) {
2370 return F_sluminance_alpha;
2371 } else if (cmp_nocase(str, "r32i") == 0) {
2372 return F_r32i;
2373 } else if (cmp_nocase(str, "r32") == 0 || cmp_nocase(str, "red32") == 0) {
2374 return F_r32;
2375 } else if (cmp_nocase(str, "rg32") == 0 || cmp_nocase(str, "r32g32") == 0) {
2376 return F_rg32;
2377 } else if (cmp_nocase(str, "rgb32") == 0 || cmp_nocase(str, "r32g32b32") == 0) {
2378 return F_rgb32;
2379 } else if (cmp_nocase_uh(str, "r8i") == 0) {
2380 return F_r8i;
2381 } else if (cmp_nocase_uh(str, "rg8i") == 0 || cmp_nocase_uh(str, "r8g8i") == 0) {
2382 return F_rg8i;
2383 } else if (cmp_nocase_uh(str, "rgb8i") == 0 || cmp_nocase_uh(str, "r8g8b8i") == 0) {
2384 return F_rgb8i;
2385 } else if (cmp_nocase_uh(str, "rgba8i") == 0 || cmp_nocase_uh(str, "r8g8b8a8i") == 0) {
2386 return F_rgba8i;
2387 } else if (cmp_nocase(str, "r11g11b10") == 0) {
2388 return F_r11_g11_b10;
2389 } else if (cmp_nocase(str, "rgb9_e5") == 0) {
2390 return F_rgb9_e5;
2391 } else if (cmp_nocase_uh(str, "rgb10_a2") == 0 || cmp_nocase(str, "r10g10b10a2") == 0) {
2392 return F_rgb10_a2;
2393 } else if (cmp_nocase_uh(str, "rg") == 0) {
2394 return F_rg;
2395 } else if (cmp_nocase_uh(str, "r16i") == 0) {
2396 return F_r16i;
2397 } else if (cmp_nocase_uh(str, "rg16i") == 0 || cmp_nocase_uh(str, "r16g16i") == 0) {
2398 return F_rg16i;
2399 } else if (cmp_nocase_uh(str, "rgb16i") == 0 || cmp_nocase_uh(str, "r16g16b16i") == 0) {
2400 return F_rgb16i;
2401 } else if (cmp_nocase_uh(str, "rgba16i") == 0 || cmp_nocase_uh(str, "r16g16b16a16i") == 0) {
2402 return F_rgba16i;
2403 } else if (cmp_nocase_uh(str, "rg32i") == 0 || cmp_nocase_uh(str, "r32g32i") == 0) {
2404 return F_rg32i;
2405 } else if (cmp_nocase_uh(str, "rgb32i") == 0 || cmp_nocase_uh(str, "r32g32b32i") == 0) {
2406 return F_rgb32i;
2407 } else if (cmp_nocase_uh(str, "rgba32i") == 0 || cmp_nocase_uh(str, "r32g32b32a32i") == 0) {
2408 return F_rgba32i;
2409 }
2410
2411 gobj_cat->error()
2412 << "Invalid Texture::Format value: " << str << "\n";
2413 return F_rgba;
2414}
2415
2416/**
2417 * Returns the indicated CompressionMode converted to a string word.
2418 */
2420format_compression_mode(CompressionMode cm) {
2421 switch (cm) {
2422 case CM_default:
2423 return "default";
2424 case CM_off:
2425 return "off";
2426 case CM_on:
2427 return "on";
2428 case CM_fxt1:
2429 return "fxt1";
2430 case CM_dxt1:
2431 return "dxt1";
2432 case CM_dxt2:
2433 return "dxt2";
2434 case CM_dxt3:
2435 return "dxt3";
2436 case CM_dxt4:
2437 return "dxt4";
2438 case CM_dxt5:
2439 return "dxt5";
2440 case CM_pvr1_2bpp:
2441 return "pvr1_2bpp";
2442 case CM_pvr1_4bpp:
2443 return "pvr1_4bpp";
2444 case CM_rgtc:
2445 return "rgtc";
2446 case CM_etc1:
2447 return "etc1";
2448 case CM_etc2:
2449 return "etc2";
2450 case CM_eac:
2451 return "eac";
2452 }
2453
2454 return "**invalid**";
2455}
2456
2457/**
2458 * Returns the CompressionMode value associated with the given string
2459 * representation.
2460 */
2461Texture::CompressionMode Texture::
2462string_compression_mode(const string &str) {
2463 if (cmp_nocase_uh(str, "default") == 0) {
2464 return CM_default;
2465 } else if (cmp_nocase_uh(str, "off") == 0) {
2466 return CM_off;
2467 } else if (cmp_nocase_uh(str, "on") == 0) {
2468 return CM_on;
2469 } else if (cmp_nocase_uh(str, "fxt1") == 0) {
2470 return CM_fxt1;
2471 } else if (cmp_nocase_uh(str, "dxt1") == 0) {
2472 return CM_dxt1;
2473 } else if (cmp_nocase_uh(str, "dxt2") == 0) {
2474 return CM_dxt2;
2475 } else if (cmp_nocase_uh(str, "dxt3") == 0) {
2476 return CM_dxt3;
2477 } else if (cmp_nocase_uh(str, "dxt4") == 0) {
2478 return CM_dxt4;
2479 } else if (cmp_nocase_uh(str, "dxt5") == 0) {
2480 return CM_dxt5;
2481 } else if (cmp_nocase_uh(str, "pvr1_2bpp") == 0) {
2482 return CM_pvr1_2bpp;
2483 } else if (cmp_nocase_uh(str, "pvr1_4bpp") == 0) {
2484 return CM_pvr1_4bpp;
2485 } else if (cmp_nocase_uh(str, "rgtc") == 0) {
2486 return CM_rgtc;
2487 } else if (cmp_nocase_uh(str, "etc1") == 0) {
2488 return CM_etc1;
2489 } else if (cmp_nocase_uh(str, "etc2") == 0) {
2490 return CM_etc2;
2491 } else if (cmp_nocase_uh(str, "eac") == 0) {
2492 return CM_eac;
2493 }
2494
2495 gobj_cat->error()
2496 << "Invalid Texture::CompressionMode value: " << str << "\n";
2497 return CM_default;
2498}
2499
2500
2501/**
2502 * Returns the indicated QualityLevel converted to a string word.
2503 */
2505format_quality_level(QualityLevel ql) {
2506 switch (ql) {
2507 case QL_default:
2508 return "default";
2509 case QL_fastest:
2510 return "fastest";
2511 case QL_normal:
2512 return "normal";
2513 case QL_best:
2514 return "best";
2515 }
2516
2517 return "**invalid**";
2518}
2519
2520/**
2521 * Returns the QualityLevel value associated with the given string
2522 * representation.
2523 */
2524Texture::QualityLevel Texture::
2525string_quality_level(const string &str) {
2526 if (cmp_nocase(str, "default") == 0) {
2527 return QL_default;
2528 } else if (cmp_nocase(str, "fastest") == 0) {
2529 return QL_fastest;
2530 } else if (cmp_nocase(str, "normal") == 0) {
2531 return QL_normal;
2532 } else if (cmp_nocase(str, "best") == 0) {
2533 return QL_best;
2534 }
2535
2536 gobj_cat->error()
2537 << "Invalid Texture::QualityLevel value: " << str << "\n";
2538 return QL_default;
2539}
2540
2541/**
2542 * This method is called by the GraphicsEngine at the beginning of the frame
2543 * *after* a texture has been successfully uploaded to graphics memory. It is
2544 * intended as a callback so the texture can release its RAM image, if
2545 * _keep_ram_image is false.
2546 *
2547 * This is called indirectly when the GSG calls
2548 * GraphicsEngine::texture_uploaded().
2549 */
2552 CDLockedReader cdata(_cycler);
2553
2554 if (!keep_texture_ram && !cdata->_keep_ram_image) {
2555 // Once we have prepared the texture, we can generally safely remove the
2556 // pixels from main RAM. The GSG is now responsible for remembering what
2557 // it looks like.
2558
2559 CDWriter cdataw(_cycler, cdata, false);
2560 if (gobj_cat.is_debug()) {
2561 gobj_cat.debug()
2562 << "Dumping RAM for texture " << get_name() << "\n";
2563 }
2564 do_clear_ram_image(cdataw);
2565 }
2566}
2567
2568/**
2569 * Should be overridden by derived classes to return true if cull_callback()
2570 * has been defined. Otherwise, returns false to indicate cull_callback()
2571 * does not need to be called for this node during the cull traversal.
2572 */
2574has_cull_callback() const {
2575 return false;
2576}
2577
2578/**
2579 * If has_cull_callback() returns true, this function will be called during
2580 * the cull traversal to perform any additional operations that should be
2581 * performed at cull time.
2582 *
2583 * This is called each time the Texture is discovered applied to a Geom in the
2584 * traversal. It should return true if the Geom is visible, false if it
2585 * should be omitted.
2586 */
2589 return true;
2590}
2591
2592/**
2593 * A factory function to make a new Texture, used to pass to the TexturePool.
2594 */
2595PT(Texture) Texture::
2596make_texture() {
2597 return new Texture;
2598}
2599
2600/**
2601 * Returns true if the indicated component type is unsigned, false otherwise.
2602 */
2604is_unsigned(Texture::ComponentType ctype) {
2605 return (ctype == T_unsigned_byte ||
2606 ctype == T_unsigned_short ||
2607 ctype == T_unsigned_int_24_8 ||
2608 ctype == T_unsigned_int);
2609}
2610
2611/**
2612 * Returns true if the indicated compression mode is one of the specific
2613 * compression types, false otherwise.
2614 */
2616is_specific(Texture::CompressionMode compression) {
2617 switch (compression) {
2618 case CM_default:
2619 case CM_off:
2620 case CM_on:
2621 return false;
2622
2623 default:
2624 return true;
2625 }
2626}
2627
2628/**
2629 * Returns true if the indicated format includes alpha, false otherwise.
2630 */
2632has_alpha(Format format) {
2633 switch (format) {
2634 case F_alpha:
2635 case F_rgba:
2636 case F_rgbm:
2637 case F_rgba4:
2638 case F_rgba5:
2639 case F_rgba8:
2640 case F_rgba12:
2641 case F_rgba16:
2642 case F_rgba32:
2643 case F_luminance_alpha:
2644 case F_luminance_alphamask:
2645 case F_srgb_alpha:
2646 case F_sluminance_alpha:
2647 case F_rgba8i:
2648 case F_rgb10_a2:
2649 case F_rgba16i:
2650 case F_rgba32i:
2651 return true;
2652
2653 default:
2654 return false;
2655 }
2656}
2657
2658/**
2659 * Returns true if the indicated format includes a binary alpha only, false
2660 * otherwise.
2661 */
2663has_binary_alpha(Format format) {
2664 switch (format) {
2665 case F_rgbm:
2666 return true;
2667
2668 default:
2669 return false;
2670 }
2671}
2672
2673/**
2674 * Returns true if the indicated format is in the sRGB color space, false
2675 * otherwise.
2676 */
2678is_srgb(Format format) {
2679 switch (format) {
2680 case F_srgb:
2681 case F_srgb_alpha:
2682 case F_sluminance:
2683 case F_sluminance_alpha:
2684 return true;
2685
2686 default:
2687 return false;
2688 }
2689}
2690
2691/**
2692 * Returns true if the indicated format is an integer format, false otherwise.
2693 */
2695is_integer(Format format) {
2696 switch (format) {
2697 case F_r32i:
2698 case F_r8i:
2699 case F_rg8i:
2700 case F_rgb8i:
2701 case F_rgba8i:
2702 case F_r16i:
2703 case F_rg16i:
2704 case F_rgb16i:
2705 case F_rgba16i:
2706 case F_rg32i:
2707 case F_rgb32i:
2708 case F_rgba32i:
2709 return true;
2710
2711 default:
2712 return false;
2713 }
2714}
2715
2716/**
2717 * Computes the proper size of the texture, based on the original size, the
2718 * filename, and the resizing whims of the config file.
2719 *
2720 * x_size and y_size should be loaded with the texture image's original size
2721 * on disk. On return, they will be loaded with the texture's in-memory
2722 * target size. The return value is true if the size has been adjusted, or
2723 * false if it is the same.
2724 */
2726adjust_size(int &x_size, int &y_size, const string &name,
2727 bool for_padding, AutoTextureScale auto_texture_scale) {
2728 bool exclude = false;
2729 int num_excludes = exclude_texture_scale.get_num_unique_values();
2730 for (int i = 0; i < num_excludes && !exclude; ++i) {
2731 GlobPattern pat(exclude_texture_scale.get_unique_value(i));
2732 if (pat.matches(name)) {
2733 exclude = true;
2734 }
2735 }
2736
2737 int new_x_size = x_size;
2738 int new_y_size = y_size;
2739
2740 if (!exclude) {
2741 new_x_size = (int)cfloor(new_x_size * texture_scale + 0.5);
2742 new_y_size = (int)cfloor(new_y_size * texture_scale + 0.5);
2743
2744 // Don't auto-scale below 4 in either dimension. This causes problems for
2745 // DirectX and texture compression.
2746 new_x_size = min(max(new_x_size, (int)texture_scale_limit), x_size);
2747 new_y_size = min(max(new_y_size, (int)texture_scale_limit), y_size);
2748 }
2749
2750 AutoTextureScale ats = auto_texture_scale;
2751 if (ats == ATS_unspecified) {
2752 ats = get_textures_power_2();
2753 }
2754 if (!for_padding && ats == ATS_pad) {
2755 // If we're not calculating the padding size--that is, we're calculating
2756 // the initial scaling size instead--then ignore ATS_pad, and treat it the
2757 // same as ATS_none.
2758 ats = ATS_none;
2759 }
2760
2761 switch (ats) {
2762 case ATS_down:
2763 new_x_size = down_to_power_2(new_x_size);
2764 new_y_size = down_to_power_2(new_y_size);
2765 break;
2766
2767 case ATS_up:
2768 case ATS_pad:
2769 new_x_size = up_to_power_2(new_x_size);
2770 new_y_size = up_to_power_2(new_y_size);
2771 break;
2772
2773 case ATS_none:
2774 case ATS_unspecified:
2775 break;
2776 }
2777
2778 ats = textures_square.get_value();
2779 if (!for_padding && ats == ATS_pad) {
2780 ats = ATS_none;
2781 }
2782 switch (ats) {
2783 case ATS_down:
2784 new_x_size = new_y_size = min(new_x_size, new_y_size);
2785 break;
2786
2787 case ATS_up:
2788 case ATS_pad:
2789 new_x_size = new_y_size = max(new_x_size, new_y_size);
2790 break;
2791
2792 case ATS_none:
2793 case ATS_unspecified:
2794 break;
2795 }
2796
2797 if (!exclude) {
2798 int max_dimension = max_texture_dimension;
2799
2800 if (max_dimension < 0) {
2802 if (gsg != nullptr) {
2803 max_dimension = gsg->get_max_texture_dimension();
2804 }
2805 }
2806
2807 if (max_dimension > 0) {
2808 new_x_size = min(new_x_size, (int)max_dimension);
2809 new_y_size = min(new_y_size, (int)max_dimension);
2810 }
2811 }
2812
2813 if (x_size != new_x_size || y_size != new_y_size) {
2814 x_size = new_x_size;
2815 y_size = new_y_size;
2816 return true;
2817 }
2818
2819 return false;
2820}
2821
2822/**
2823 * May be called prior to calling read_txo() or any bam-related Texture-
2824 * creating callback, to ensure that the proper dynamic libraries for a
2825 * Texture of the current class type, and the indicated filename, have been
2826 * already loaded.
2827 *
2828 * This is a low-level function that should not normally need to be called
2829 * directly by the user.
2830 *
2831 * Note that for best results you must first create a Texture object of the
2832 * appropriate class type for your filename, for instance with
2833 * TexturePool::make_texture().
2834 */
2836ensure_loader_type(const Filename &filename) {
2837 // For a plain Texture type, this doesn't need to do anything.
2838}
2839
2840/**
2841 * Called by TextureContext to give the Texture a chance to mark itself dirty
2842 * before rendering, if necessary.
2843 */
2844void Texture::
2845reconsider_dirty() {
2846}
2847
2848/**
2849 * Works like adjust_size, but also considers the texture class. Movie
2850 * textures, for instance, always pad outwards, regardless of textures-
2851 * power-2.
2852 */
2853bool Texture::
2854do_adjust_this_size(const CData *cdata, int &x_size, int &y_size, const string &name,
2855 bool for_padding) const {
2856 return adjust_size(x_size, y_size, name, for_padding, cdata->_auto_texture_scale);
2857}
2858
2859/**
2860 * The internal implementation of the various read() methods.
2861 */
2862bool Texture::
2863do_read(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
2864 int primary_file_num_channels, int alpha_file_channel,
2865 int z, int n, bool read_pages, bool read_mipmaps,
2866 const LoaderOptions &options, BamCacheRecord *record) {
2867 PStatTimer timer(_texture_read_pcollector);
2868
2869 if (options.get_auto_texture_scale() != ATS_unspecified) {
2870 cdata->_auto_texture_scale = options.get_auto_texture_scale();
2871 }
2872
2873 bool header_only = ((options.get_texture_flags() & (LoaderOptions::TF_preload | LoaderOptions::TF_preload_simple)) == 0);
2874 if (record != nullptr) {
2875 header_only = false;
2876 }
2877
2878 if ((z == 0 || read_pages) && (n == 0 || read_mipmaps)) {
2879 // When we re-read the page 0 of the base image, we clear everything and
2880 // start over.
2881 do_clear_ram_image(cdata);
2882 }
2883
2884 if (is_txo_filename(fullpath)) {
2885 if (record != nullptr) {
2886 record->add_dependent_file(fullpath);
2887 }
2888 return do_read_txo_file(cdata, fullpath);
2889 }
2890
2891 if (is_dds_filename(fullpath)) {
2892 if (record != nullptr) {
2893 record->add_dependent_file(fullpath);
2894 }
2895 return do_read_dds_file(cdata, fullpath, header_only);
2896 }
2897
2898 if (is_ktx_filename(fullpath)) {
2899 if (record != nullptr) {
2900 record->add_dependent_file(fullpath);
2901 }
2902 return do_read_ktx_file(cdata, fullpath, header_only);
2903 }
2904
2905 // If read_pages or read_mipmaps is specified, then z and n actually
2906 // indicate z_size and n_size, respectively--the numerical limits on which
2907 // to search for filenames.
2908 int z_size = z;
2909 int n_size = n;
2910
2911 // Certain texture types have an implicit z_size. If z_size is omitted,
2912 // choose an appropriate default based on the texture type.
2913 if (z_size == 0) {
2914 switch (cdata->_texture_type) {
2915 case TT_1d_texture:
2916 case TT_2d_texture:
2917 case TT_buffer_texture:
2918 z_size = 1;
2919 break;
2920
2921 case TT_cube_map:
2922 z_size = 6;
2923 break;
2924
2925 default:
2926 break;
2927 }
2928 }
2929
2930 int num_views = 0;
2931 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
2932 // We'll be loading a multiview texture.
2933 read_pages = true;
2934 if (options.get_texture_num_views() != 0) {
2935 num_views = options.get_texture_num_views();
2936 do_set_num_views(cdata, num_views);
2937 }
2938 }
2939
2941
2942 if (read_pages && read_mipmaps) {
2943 // Read a sequence of pages * mipmap levels.
2944 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
2945 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
2946 do_set_z_size(cdata, z_size);
2947
2948 n = 0;
2949 while (true) {
2950 // For mipmap level 0, the total number of pages might be determined by
2951 // the number of files we find. After mipmap level 0, though, the
2952 // number of pages is predetermined.
2953 if (n != 0) {
2954 z_size = do_get_expected_mipmap_z_size(cdata, n);
2955 }
2956
2957 z = 0;
2958
2959 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2960 Filename alpha_n_pattern = Filename::pattern_filename(alpha_fullpath_pattern.get_filename_index(z));
2961
2962 if (!n_pattern.has_hash()) {
2963 gobj_cat.error()
2964 << "Filename requires two different hash sequences: " << fullpath
2965 << "\n";
2966 return false;
2967 }
2968
2969 Filename file = n_pattern.get_filename_index(n);
2970 Filename alpha_file = alpha_n_pattern.get_filename_index(n);
2971
2972 if ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
2973 (n_size != 0 && n < n_size)) {
2974 // Continue through the loop.
2975 } else {
2976 // We've reached the end of the mipmap sequence.
2977 break;
2978 }
2979
2980 int num_pages = z_size * num_views;
2981 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
2982 (num_pages != 0 && z < num_pages)) {
2983 if (!do_read_one(cdata, file, alpha_file, z, n, primary_file_num_channels,
2984 alpha_file_channel, options, header_only, record)) {
2985 return false;
2986 }
2987 ++z;
2988
2989 n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2990 file = n_pattern.get_filename_index(n);
2991 alpha_file = alpha_n_pattern.get_filename_index(n);
2992 }
2993
2994 if (n == 0 && n_size == 0) {
2995 // If n_size is not specified, it gets implicitly set after we read
2996 // the base texture image (which determines the size of the texture).
2997 n_size = do_get_expected_num_mipmap_levels(cdata);
2998 }
2999 ++n;
3000 }
3001 cdata->_fullpath = fullpath_pattern;
3002 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3003
3004 } else if (read_pages) {
3005 // Read a sequence of cube map or 3-D texture pages.
3006 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3007 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3008 if (!fullpath_pattern.has_hash()) {
3009 gobj_cat.error()
3010 << "Filename requires a hash mark: " << fullpath
3011 << "\n";
3012 return false;
3013 }
3014
3015 do_set_z_size(cdata, z_size);
3016 z = 0;
3017 Filename file = fullpath_pattern.get_filename_index(z);
3018 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3019
3020 int num_pages = z_size * num_views;
3021 while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
3022 (num_pages != 0 && z < num_pages)) {
3023 if (!do_read_one(cdata, file, alpha_file, z, 0, primary_file_num_channels,
3024 alpha_file_channel, options, header_only, record)) {
3025 return false;
3026 }
3027 ++z;
3028
3029 file = fullpath_pattern.get_filename_index(z);
3030 alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3031 }
3032 cdata->_fullpath = fullpath_pattern;
3033 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3034
3035 } else if (read_mipmaps) {
3036 // Read a sequence of mipmap levels.
3037 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3038 Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3039 if (!fullpath_pattern.has_hash()) {
3040 gobj_cat.error()
3041 << "Filename requires a hash mark: " << fullpath
3042 << "\n";
3043 return false;
3044 }
3045
3046 n = 0;
3047 Filename file = fullpath_pattern.get_filename_index(n);
3048 Filename alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3049
3050 while ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
3051 (n_size != 0 && n < n_size)) {
3052 if (!do_read_one(cdata, file, alpha_file, z, n,
3053 primary_file_num_channels, alpha_file_channel,
3054 options, header_only, record)) {
3055 return false;
3056 }
3057 ++n;
3058
3059 if (n_size == 0 && n >= do_get_expected_num_mipmap_levels(cdata)) {
3060 // Don't try to read more than the requisite number of mipmap levels
3061 // (unless the user insisted on it for some reason).
3062 break;
3063 }
3064
3065 file = fullpath_pattern.get_filename_index(n);
3066 alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3067 }
3068 cdata->_fullpath = fullpath_pattern;
3069 cdata->_alpha_fullpath = alpha_fullpath_pattern;
3070
3071 } else {
3072 // Just an ordinary read of one file.
3073 if (!do_read_one(cdata, fullpath, alpha_fullpath, z, n,
3074 primary_file_num_channels, alpha_file_channel,
3075 options, header_only, record)) {
3076 return false;
3077 }
3078 }
3079
3080 cdata->_has_read_pages = read_pages;
3081 cdata->_has_read_mipmaps = read_mipmaps;
3082 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
3083
3084 if (header_only) {
3085 // If we were only supposed to be checking the image header information,
3086 // don't let the Texture think that it's got the image now.
3087 do_clear_ram_image(cdata);
3088 } else {
3089 if ((options.get_texture_flags() & LoaderOptions::TF_preload) != 0) {
3090 // If we intend to keep the ram image around, consider compressing it
3091 // etc.
3092 bool generate_mipmaps = ((options.get_texture_flags() & LoaderOptions::TF_generate_mipmaps) != 0);
3093 bool allow_compression = ((options.get_texture_flags() & LoaderOptions::TF_allow_compression) != 0);
3094 do_consider_auto_process_ram_image(cdata, generate_mipmaps || uses_mipmaps(), allow_compression);
3095 }
3096 }
3097
3098 return true;
3099}
3100
3101/**
3102 * Called only from do_read(), this method reads a single image file, either
3103 * one page or one mipmap level.
3104 */
3105bool Texture::
3106do_read_one(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
3107 int z, int n, int primary_file_num_channels, int alpha_file_channel,
3108 const LoaderOptions &options, bool header_only, BamCacheRecord *record) {
3109 if (record != nullptr) {
3110 nassertr(!header_only, false);
3111 record->add_dependent_file(fullpath);
3112 }
3113
3114 PNMImage image;
3115 PfmFile pfm;
3116 PNMReader *image_reader = image.make_reader(fullpath, nullptr, false);
3117 if (image_reader == nullptr) {
3118 gobj_cat.error()
3119 << "Texture::read() - couldn't read: " << fullpath << endl;
3120 return false;
3121 }
3122 image.copy_header_from(*image_reader);
3123
3124 AutoTextureScale auto_texture_scale = do_get_auto_texture_scale(cdata);
3125
3126 // If it's a floating-point image file, read it by default into a floating-
3127 // point texture.
3128 bool read_floating_point;
3129 int texture_load_type = (options.get_texture_flags() & (LoaderOptions::TF_integer | LoaderOptions::TF_float));
3130 switch (texture_load_type) {
3131 case LoaderOptions::TF_integer:
3132 read_floating_point = false;
3133 break;
3134
3135 case LoaderOptions::TF_float:
3136 read_floating_point = true;
3137 break;
3138
3139 default:
3140 // Neither TF_integer nor TF_float was specified; determine which way the
3141 // texture wants to be loaded.
3142 read_floating_point = (image_reader->is_floating_point());
3143 if (!alpha_fullpath.empty()) {
3144 read_floating_point = false;
3145 }
3146 }
3147
3148 if (header_only || textures_header_only) {
3149 int x_size = image.get_x_size();
3150 int y_size = image.get_y_size();
3151 if (z == 0 && n == 0) {
3152 cdata->_orig_file_x_size = x_size;
3153 cdata->_orig_file_y_size = y_size;
3154 }
3155
3156 if (textures_header_only) {
3157 // In this mode, we never intend to load the actual texture image
3158 // anyway, so we don't even need to make the size right.
3159 x_size = 1;
3160 y_size = 1;
3161
3162 } else {
3163 adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale);
3164 }
3165
3166 if (read_floating_point) {
3167 pfm.clear(x_size, y_size, image.get_num_channels());
3168 } else {
3169 image = PNMImage(x_size, y_size, image.get_num_channels(),
3170 image.get_maxval(), image.get_type(),
3171 image.get_color_space());
3172 image.fill(0.2, 0.3, 1.0);
3173 if (image.has_alpha()) {
3174 image.alpha_fill(1.0);
3175 }
3176 }
3177 delete image_reader;
3178
3179 } else {
3180 if (z == 0 && n == 0) {
3181 int x_size = image.get_x_size();
3182 int y_size = image.get_y_size();
3183
3184 cdata->_orig_file_x_size = x_size;
3185 cdata->_orig_file_y_size = y_size;
3186
3187 if (adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale)) {
3188 image.set_read_size(x_size, y_size);
3189 }
3190 } else {
3191 image.set_read_size(do_get_expected_mipmap_x_size(cdata, n),
3192 do_get_expected_mipmap_y_size(cdata, n));
3193 }
3194
3195 if (image.get_x_size() != image.get_read_x_size() ||
3196 image.get_y_size() != image.get_read_y_size()) {
3197 gobj_cat.info()
3198 << "Implicitly rescaling " << fullpath.get_basename() << " from "
3199 << image.get_x_size() << " by " << image.get_y_size() << " to "
3200 << image.get_read_x_size() << " by " << image.get_read_y_size()
3201 << "\n";
3202 }
3203
3204 bool success;
3205 if (read_floating_point) {
3206 success = pfm.read(image_reader);
3207 } else {
3208 success = image.read(image_reader);
3209 }
3210
3211 if (!success) {
3212 gobj_cat.error()
3213 << "Texture::read() - couldn't read: " << fullpath << endl;
3214 return false;
3215 }
3217 }
3218
3219 PNMImage alpha_image;
3220 if (!alpha_fullpath.empty()) {
3221 PNMReader *alpha_image_reader = alpha_image.make_reader(alpha_fullpath, nullptr, false);
3222 if (alpha_image_reader == nullptr) {
3223 gobj_cat.error()
3224 << "Texture::read() - couldn't read: " << alpha_fullpath << endl;
3225 return false;
3226 }
3227 alpha_image.copy_header_from(*alpha_image_reader);
3228
3229 if (record != nullptr) {
3230 record->add_dependent_file(alpha_fullpath);
3231 }
3232
3233 if (header_only || textures_header_only) {
3234 int x_size = image.get_x_size();
3235 int y_size = image.get_y_size();
3236 alpha_image = PNMImage(x_size, y_size, alpha_image.get_num_channels(),
3237 alpha_image.get_maxval(), alpha_image.get_type(),
3238 alpha_image.get_color_space());
3239 alpha_image.fill(1.0);
3240 if (alpha_image.has_alpha()) {
3241 alpha_image.alpha_fill(1.0);
3242 }
3243 delete alpha_image_reader;
3244
3245 } else {
3246 if (image.get_x_size() != alpha_image.get_x_size() ||
3247 image.get_y_size() != alpha_image.get_y_size()) {
3248 gobj_cat.info()
3249 << "Implicitly rescaling " << alpha_fullpath.get_basename()
3250 << " from " << alpha_image.get_x_size() << " by "
3251 << alpha_image.get_y_size() << " to " << image.get_x_size()
3252 << " by " << image.get_y_size() << "\n";
3253 alpha_image.set_read_size(image.get_x_size(), image.get_y_size());
3254 }
3255
3256 if (!alpha_image.read(alpha_image_reader)) {
3257 gobj_cat.error()
3258 << "Texture::read() - couldn't read (alpha): " << alpha_fullpath << endl;
3259 return false;
3260 }
3262 }
3263 }
3264
3265 if (z == 0 && n == 0) {
3266 if (!has_name()) {
3267 set_name(fullpath.get_basename_wo_extension());
3268 }
3269 if (cdata->_filename.empty()) {
3270 cdata->_filename = fullpath;
3271 cdata->_alpha_filename = alpha_fullpath;
3272
3273 // The first time we set the filename via a read() operation, we clear
3274 // keep_ram_image. The user can always set it again later if he needs
3275 // to.
3276 cdata->_keep_ram_image = false;
3277 }
3278
3279 cdata->_fullpath = fullpath;
3280 cdata->_alpha_fullpath = alpha_fullpath;
3281 }
3282
3283 if (!alpha_fullpath.empty()) {
3284 // The grayscale (alpha channel) image must be the same size as the main
3285 // image. This should really have been already guaranteed by the above.
3286 if (image.get_x_size() != alpha_image.get_x_size() ||
3287 image.get_y_size() != alpha_image.get_y_size()) {
3288 gobj_cat.info()
3289 << "Automatically rescaling " << alpha_fullpath.get_basename()
3290 << " from " << alpha_image.get_x_size() << " by "
3291 << alpha_image.get_y_size() << " to " << image.get_x_size()
3292 << " by " << image.get_y_size() << "\n";
3293
3294 PNMImage scaled(image.get_x_size(), image.get_y_size(),
3295 alpha_image.get_num_channels(),
3296 alpha_image.get_maxval(), alpha_image.get_type(),
3297 alpha_image.get_color_space());
3298 scaled.quick_filter_from(alpha_image);
3300 alpha_image = scaled;
3301 }
3302 }
3303
3304 if (n == 0) {
3305 consider_downgrade(image, primary_file_num_channels, get_name());
3306 cdata->_primary_file_num_channels = image.get_num_channels();
3307 cdata->_alpha_file_channel = 0;
3308 }
3309
3310 if (!alpha_fullpath.empty()) {
3311 // Make the original image a 4-component image by taking the grayscale
3312 // value from the second image.
3313 image.add_alpha();
3314
3315 if (alpha_file_channel == 4 ||
3316 (alpha_file_channel == 2 && alpha_image.get_num_channels() == 2)) {
3317
3318 if (!alpha_image.has_alpha()) {
3319 gobj_cat.error()
3320 << alpha_fullpath.get_basename() << " has no channel " << alpha_file_channel << ".\n";
3321 } else {
3322 // Use the alpha channel.
3323 for (int x = 0; x < image.get_x_size(); x++) {
3324 for (int y = 0; y < image.get_y_size(); y++) {
3325 image.set_alpha(x, y, alpha_image.get_alpha(x, y));
3326 }
3327 }
3328 }
3329 cdata->_alpha_file_channel = alpha_image.get_num_channels();
3330
3331 } else if (alpha_file_channel >= 1 && alpha_file_channel <= 3 &&
3332 alpha_image.get_num_channels() >= 3) {
3333 // Use the appropriate red, green, or blue channel.
3334 for (int x = 0; x < image.get_x_size(); x++) {
3335 for (int y = 0; y < image.get_y_size(); y++) {
3336 image.set_alpha(x, y, alpha_image.get_channel_val(x, y, alpha_file_channel - 1));
3337 }
3338 }
3339 cdata->_alpha_file_channel = alpha_file_channel;
3340
3341 } else {
3342 // Use the grayscale channel.
3343 for (int x = 0; x < image.get_x_size(); x++) {
3344 for (int y = 0; y < image.get_y_size(); y++) {
3345 image.set_alpha(x, y, alpha_image.get_gray(x, y));
3346 }
3347 }
3348 cdata->_alpha_file_channel = 0;
3349 }
3350 }
3351
3352 if (read_floating_point) {
3353 if (!do_load_one(cdata, pfm, fullpath.get_basename(), z, n, options)) {
3354 return false;
3355 }
3356 } else {
3357 // Now see if we want to pad the image within a larger power-of-2 image.
3358 int pad_x_size = 0;
3359 int pad_y_size = 0;
3360 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
3361 int new_x_size = image.get_x_size();
3362 int new_y_size = image.get_y_size();
3363 if (do_adjust_this_size(cdata, new_x_size, new_y_size, fullpath.get_basename(), true)) {
3364 pad_x_size = new_x_size - image.get_x_size();
3365 pad_y_size = new_y_size - image.get_y_size();
3366 PNMImage new_image(new_x_size, new_y_size, image.get_num_channels(),
3367 image.get_maxval(), image.get_type(),
3368 image.get_color_space());
3369 new_image.copy_sub_image(image, 0, new_y_size - image.get_y_size());
3370 image.take_from(new_image);
3371 }
3372 }
3373
3374 if (!do_load_one(cdata, image, fullpath.get_basename(), z, n, options)) {
3375 return false;
3376 }
3377
3378 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
3379 }
3380 return true;
3381}
3382
3383/**
3384 * Internal method to load a single page or mipmap level.
3385 */
3386bool Texture::
3387do_load_one(CData *cdata, const PNMImage &pnmimage, const string &name, int z, int n,
3388 const LoaderOptions &options) {
3389 if (cdata->_ram_images.size() <= 1 && n == 0) {
3390 // A special case for mipmap level 0. When we load mipmap level 0, unless
3391 // we already have mipmap levels, it determines the image properties like
3392 // size and number of components.
3393 if (!do_reconsider_z_size(cdata, z, options)) {
3394 return false;
3395 }
3396 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3397
3398 if (z == 0) {
3399 ComponentType component_type = T_unsigned_byte;
3400 xelval maxval = pnmimage.get_maxval();
3401 if (maxval > 255) {
3402 component_type = T_unsigned_short;
3403 }
3404
3405 if (!do_reconsider_image_properties(cdata, pnmimage.get_x_size(), pnmimage.get_y_size(),
3406 pnmimage.get_num_channels(), component_type,
3407 z, options)) {
3408 return false;
3409 }
3410 }
3411
3412 do_modify_ram_image(cdata);
3413 cdata->_loaded_from_image = true;
3414 }
3415
3416 do_modify_ram_mipmap_image(cdata, n);
3417
3418 // Ensure the PNMImage is an appropriate size.
3419 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3420 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3421 if (pnmimage.get_x_size() != x_size ||
3422 pnmimage.get_y_size() != y_size) {
3423 gobj_cat.info()
3424 << "Automatically rescaling " << name;
3425 if (n != 0) {
3426 gobj_cat.info(false)
3427 << " mipmap level " << n;
3428 }
3429 gobj_cat.info(false)
3430 << " from " << pnmimage.get_x_size() << " by "
3431 << pnmimage.get_y_size() << " to " << x_size << " by "
3432 << y_size << "\n";
3433
3434 PNMImage scaled(x_size, y_size, pnmimage.get_num_channels(),
3435 pnmimage.get_maxval(), pnmimage.get_type(),
3436 pnmimage.get_color_space());
3437 scaled.quick_filter_from(pnmimage);
3439
3440 convert_from_pnmimage(cdata->_ram_images[n]._image,
3441 do_get_expected_ram_mipmap_page_size(cdata, n),
3442 x_size, 0, 0, z, scaled,
3443 cdata->_num_components, cdata->_component_width);
3444 } else {
3445 // Now copy the pixel data from the PNMImage into our internal
3446 // cdata->_image component.
3447 convert_from_pnmimage(cdata->_ram_images[n]._image,
3448 do_get_expected_ram_mipmap_page_size(cdata, n),
3449 x_size, 0, 0, z, pnmimage,
3450 cdata->_num_components, cdata->_component_width);
3451 }
3453
3454 return true;
3455}
3456
3457/**
3458 * Internal method to load a single page or mipmap level.
3459 */
3460bool Texture::
3461do_load_one(CData *cdata, const PfmFile &pfm, const string &name, int z, int n,
3462 const LoaderOptions &options) {
3463 if (cdata->_ram_images.size() <= 1 && n == 0) {
3464 // A special case for mipmap level 0. When we load mipmap level 0, unless
3465 // we already have mipmap levels, it determines the image properties like
3466 // size and number of components.
3467 if (!do_reconsider_z_size(cdata, z, options)) {
3468 return false;
3469 }
3470 nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3471
3472 if (z == 0) {
3473 ComponentType component_type = T_float;
3474 if (!do_reconsider_image_properties(cdata, pfm.get_x_size(), pfm.get_y_size(),
3475 pfm.get_num_channels(), component_type,
3476 z, options)) {
3477 return false;
3478 }
3479 }
3480
3481 do_modify_ram_image(cdata);
3482 cdata->_loaded_from_image = true;
3483 }
3484
3485 do_modify_ram_mipmap_image(cdata, n);
3486
3487 // Ensure the PfmFile is an appropriate size.
3488 int x_size = do_get_expected_mipmap_x_size(cdata, n);
3489 int y_size = do_get_expected_mipmap_y_size(cdata, n);
3490 if (pfm.get_x_size() != x_size ||
3491 pfm.get_y_size() != y_size) {
3492 gobj_cat.info()
3493 << "Automatically rescaling " << name;
3494 if (n != 0) {
3495 gobj_cat.info(false)
3496 << " mipmap level " << n;
3497 }
3498 gobj_cat.info(false)
3499 << " from " << pfm.get_x_size() << " by "
3500 << pfm.get_y_size() << " to " << x_size << " by "
3501 << y_size << "\n";
3502
3503 PfmFile scaled(pfm);
3504 scaled.resize(x_size, y_size);
3506
3507 convert_from_pfm(cdata->_ram_images[n]._image,
3508 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3509 scaled, cdata->_num_components, cdata->_component_width);
3510 } else {
3511 // Now copy the pixel data from the PfmFile into our internal
3512 // cdata->_image component.
3513 convert_from_pfm(cdata->_ram_images[n]._image,
3514 do_get_expected_ram_mipmap_page_size(cdata, n), z,
3515 pfm, cdata->_num_components, cdata->_component_width);
3516 }
3518
3519 return true;
3520}
3521
3522/**
3523 * Internal method to load an image into a section of a texture page or mipmap
3524 * level.
3525 */
3526bool Texture::
3527do_load_sub_image(CData *cdata, const PNMImage &image, int x, int y, int z, int n) {
3528 nassertr(n >= 0 && (size_t)n < cdata->_ram_images.size(), false);
3529
3530 int tex_x_size = do_get_expected_mipmap_x_size(cdata, n);
3531 int tex_y_size = do_get_expected_mipmap_y_size(cdata, n);
3532 int tex_z_size = do_get_expected_mipmap_z_size(cdata, n);
3533
3534 nassertr(x >= 0 && x < tex_x_size, false);
3535 nassertr(y >= 0 && y < tex_y_size, false);
3536 nassertr(z >= 0 && z < tex_z_size, false);
3537
3538 nassertr(image.get_x_size() + x <= tex_x_size, false);
3539 nassertr(image.get_y_size() + y <= tex_y_size, false);
3540
3541 // Flip y
3542 y = cdata->_y_size - (image.get_y_size() + y);
3543
3544 cdata->inc_image_modified();
3545 do_modify_ram_mipmap_image(cdata, n);
3546 convert_from_pnmimage(cdata->_ram_images[n]._image,
3547 do_get_expected_ram_mipmap_page_size(cdata, n),
3548 tex_x_size, x, y, z, image,
3549 cdata->_num_components, cdata->_component_width);
3550
3551 return true;
3552}
3553
3554/**
3555 * Called internally when read() detects a txo file. Assumes the lock is
3556 * already held.
3557 */
3558bool Texture::
3559do_read_txo_file(CData *cdata, const Filename &fullpath) {
3561
3562 Filename filename = Filename::binary_filename(fullpath);
3563 PT(VirtualFile) file = vfs->get_file(filename);
3564 if (file == nullptr) {
3565 // No such file.
3566 gobj_cat.error()
3567 << "Could not find " << fullpath << "\n";
3568 return false;
3569 }
3570
3571 if (gobj_cat.is_debug()) {
3572 gobj_cat.debug()
3573 << "Reading texture object " << filename << "\n";
3574 }
3575
3576 istream *in = file->open_read_file(true);
3577 if (in == nullptr) {
3578 gobj_cat.error()
3579 << "Failed to open " << filename << " for reading.\n";
3580 return false;
3581 }
3582
3583 bool success = do_read_txo(cdata, *in, fullpath);
3584 vfs->close_read_file(in);
3585
3586 cdata->_fullpath = fullpath;
3587 cdata->_alpha_fullpath = Filename();
3588 cdata->_keep_ram_image = false;
3589
3590 return success;
3591}
3592
3593/**
3594 *
3595 */
3596bool Texture::
3597do_read_txo(CData *cdata, istream &in, const string &filename) {
3598 PT(Texture) other = make_from_txo(in, filename);
3599 if (other == nullptr) {
3600 return false;
3601 }
3602
3603 CDReader cdata_other(other->_cycler);
3604 Namable::operator = (*other);
3605 do_assign(cdata, other, cdata_other);
3606
3607 cdata->_loaded_from_image = true;
3608 cdata->_loaded_from_txo = true;
3609 cdata->_has_read_pages = false;
3610 cdata->_has_read_mipmaps = false;
3611 cdata->_num_mipmap_levels_read = 0;
3612 return true;
3613}
3614
3615/**
3616 * Called internally when read() detects a DDS file. Assumes the lock is
3617 * already held.
3618 */
3619bool Texture::
3620do_read_dds_file(CData *cdata, const Filename &fullpath, bool header_only) {
3622
3623 Filename filename = Filename::binary_filename(fullpath);
3624 PT(VirtualFile) file = vfs->get_file(filename);
3625 if (file == nullptr) {
3626 // No such file.
3627 gobj_cat.error()
3628 << "Could not find " << fullpath << "\n";
3629 return false;
3630 }
3631
3632 if (gobj_cat.is_debug()) {
3633 gobj_cat.debug()
3634 << "Reading DDS file " << filename << "\n";
3635 }
3636
3637 istream *in = file->open_read_file(true);
3638 if (in == nullptr) {
3639 gobj_cat.error()
3640 << "Failed to open " << filename << " for reading.\n";
3641 return false;
3642 }
3643
3644 bool success = do_read_dds(cdata, *in, fullpath, header_only);
3645 vfs->close_read_file(in);
3646
3647 if (!has_name()) {
3648 set_name(fullpath.get_basename_wo_extension());
3649 }
3650
3651 cdata->_fullpath = fullpath;
3652 cdata->_alpha_fullpath = Filename();
3653 cdata->_keep_ram_image = false;
3654
3655 return success;
3656}
3657
3658/**
3659 *
3660 */
3661bool Texture::
3662do_read_dds(CData *cdata, istream &in, const string &filename, bool header_only) {
3663 StreamReader dds(in);
3664
3665 // DDS header (19 words)
3666 DDSHeader header;
3667 header.dds_magic = dds.get_uint32();
3668 header.dds_size = dds.get_uint32();
3669 header.dds_flags = dds.get_uint32();
3670 header.height = dds.get_uint32();
3671 header.width = dds.get_uint32();
3672 header.pitch = dds.get_uint32();
3673 header.depth = dds.get_uint32();
3674 header.num_levels = dds.get_uint32();
3675 dds.skip_bytes(44);
3676
3677 // Pixelformat (8 words)
3678 header.pf.pf_size = dds.get_uint32();
3679 header.pf.pf_flags = dds.get_uint32();
3680 header.pf.four_cc = dds.get_uint32();
3681 header.pf.rgb_bitcount = dds.get_uint32();
3682 header.pf.r_mask = dds.get_uint32();
3683 header.pf.g_mask = dds.get_uint32();
3684 header.pf.b_mask = dds.get_uint32();
3685 header.pf.a_mask = dds.get_uint32();
3686
3687 // Caps (4 words)
3688 header.caps.caps1 = dds.get_uint32();
3689 header.caps.caps2 = dds.get_uint32();
3690 header.caps.ddsx = dds.get_uint32();
3691 dds.skip_bytes(4);
3692
3693 // Pad out to 32 words
3694 dds.skip_bytes(4);
3695
3696 if (header.dds_magic != DDS_MAGIC || (in.fail() || in.eof())) {
3697 gobj_cat.error()
3698 << filename << " is not a DDS file.\n";
3699 return false;
3700 }
3701
3702 if ((header.dds_flags & DDSD_MIPMAPCOUNT) == 0) {
3703 // No bit set means only the base mipmap level.
3704 header.num_levels = 1;
3705
3706 } else if (header.num_levels == 0) {
3707 // Some files seem to have this set to 0 for some reason--existing readers
3708 // assume 0 means 1.
3709 header.num_levels = 1;
3710 }
3711
3712 TextureType texture_type;
3713 if (header.caps.caps2 & DDSCAPS2_CUBEMAP) {
3714 static const unsigned int all_faces =
3715 (DDSCAPS2_CUBEMAP_POSITIVEX |
3716 DDSCAPS2_CUBEMAP_POSITIVEY |
3717 DDSCAPS2_CUBEMAP_POSITIVEZ |
3718 DDSCAPS2_CUBEMAP_NEGATIVEX |
3719 DDSCAPS2_CUBEMAP_NEGATIVEY |
3720 DDSCAPS2_CUBEMAP_NEGATIVEZ);
3721 if ((header.caps.caps2 & all_faces) != all_faces) {
3722 gobj_cat.error()
3723 << filename << " is missing some cube map faces; cannot load.\n";
3724 return false;
3725 }
3726 header.depth = 6;
3727 texture_type = TT_cube_map;
3728
3729 } else if (header.caps.caps2 & DDSCAPS2_VOLUME) {
3730 texture_type = TT_3d_texture;
3731
3732 } else {
3733 texture_type = TT_2d_texture;
3734 header.depth = 1;
3735 }
3736
3737 // Determine the function to use to read the DDS image.
3738 typedef PTA_uchar (*ReadDDSLevelFunc)(Texture *tex, Texture::CData *cdata,
3739 const DDSHeader &header, int n, istream &in);
3740 ReadDDSLevelFunc func = nullptr;
3741
3742 Format format = F_rgb;
3743 ComponentType component_type = T_unsigned_byte;
3744
3745 do_clear_ram_image(cdata);
3746 CompressionMode compression = CM_off;
3747
3748 if ((header.pf.pf_flags & DDPF_FOURCC) != 0 &&
3749 header.pf.four_cc == 0x30315844) { // 'DX10'
3750 // A DirectX 10 style texture, which has an additional header.
3751 func = read_dds_level_generic_uncompressed;
3752 unsigned int dxgi_format = dds.get_uint32();
3753 unsigned int dimension = dds.get_uint32();
3754 unsigned int misc_flag = dds.get_uint32();
3755 unsigned int array_size = dds.get_uint32();
3756 /*unsigned int alpha_mode = */dds.get_uint32();
3757
3758 switch (dxgi_format) {
3759 case 2: // DXGI_FORMAT_R32G32B32A32_FLOAT
3760 format = F_rgba32;
3761 component_type = T_float;
3762 func = read_dds_level_abgr32;
3763 break;
3764 case 10: // DXGI_FORMAT_R16G16B16A16_FLOAT
3765 format = F_rgba16;
3766 component_type = T_half_float;
3767 func = read_dds_level_abgr16;
3768 break;
3769 case 11: // DXGI_FORMAT_R16G16B16A16_UNORM
3770 format = F_rgba16;
3771 component_type = T_unsigned_short;
3772 func = read_dds_level_abgr16;
3773 break;
3774 case 12: // DXGI_FORMAT_R16G16B16A16_UINT
3775 format = F_rgba16i;
3776 component_type = T_unsigned_short;
3777 func = read_dds_level_abgr16;
3778 break;
3779 case 14: // DXGI_FORMAT_R16G16B16A16_SINT
3780 format = F_rgba16i;
3781 component_type = T_short;
3782 func = read_dds_level_abgr16;
3783 break;
3784 case 16: // DXGI_FORMAT_R32G32_FLOAT
3785 format = F_rg32;
3786 component_type = T_float;
3787 func = read_dds_level_raw;
3788 break;
3789 case 17: // DXGI_FORMAT_R32G32_UINT
3790 format = F_rg32i;
3791 component_type = T_unsigned_int;
3792 func = read_dds_level_raw;
3793 break;
3794 case 18: // DXGI_FORMAT_R32G32_SINT
3795 format = F_rg32i;
3796 component_type = T_int;
3797 func = read_dds_level_raw;
3798 break;
3799 case 27: // DXGI_FORMAT_R8G8B8A8_TYPELESS
3800 case 28: // DXGI_FORMAT_R8G8B8A8_UNORM
3801 format = F_rgba8;
3802 func = read_dds_level_abgr8;
3803 break;
3804 case 29: // DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
3805 format = F_srgb_alpha;
3806 func = read_dds_level_abgr8;
3807 break;
3808 case 30: // DXGI_FORMAT_R8G8B8A8_UINT
3809 format = F_rgba8i;
3810 func = read_dds_level_abgr8;
3811 break;
3812 case 31: // DXGI_FORMAT_R8G8B8A8_SNORM
3813 format = F_rgba8;
3814 component_type = T_byte;
3815 func = read_dds_level_abgr8;
3816 break;
3817 case 32: // DXGI_FORMAT_R8G8B8A8_SINT
3818 format = F_rgba8i;
3819 component_type = T_byte;
3820 func = read_dds_level_abgr8;
3821 break;
3822 case 34: // DXGI_FORMAT_R16G16_FLOAT:
3823 format = F_rg16;
3824 component_type = T_half_float;
3825 func = read_dds_level_raw;
3826 break;
3827 case 35: // DXGI_FORMAT_R16G16_UNORM:
3828 format = F_rg16;
3829 component_type = T_unsigned_short;
3830 func = read_dds_level_raw;
3831 break;
3832 case 36: // DXGI_FORMAT_R16G16_UINT:
3833 format = F_rg16i;
3834 component_type = T_unsigned_short;
3835 func = read_dds_level_raw;
3836 break;
3837 case 37: // DXGI_FORMAT_R16G16_SNORM:
3838 format = F_rg16;
3839 component_type = T_short;
3840 func = read_dds_level_raw;
3841 break;
3842 case 38: // DXGI_FORMAT_R16G16_SINT:
3843 format = F_rg16i;
3844 component_type = T_short;
3845 func = read_dds_level_raw;
3846 break;
3847 case 40: // DXGI_FORMAT_D32_FLOAT
3848 format = F_depth_component32;
3849 component_type = T_float;
3850 func = read_dds_level_raw;
3851 break;
3852 case 41: // DXGI_FORMAT_R32_FLOAT
3853 format = F_r32;
3854 component_type = T_float;
3855 func = read_dds_level_raw;
3856 break;
3857 case 42: // DXGI_FORMAT_R32_UINT
3858 format = F_r32i;
3859 component_type = T_unsigned_int;
3860 func = read_dds_level_raw;
3861 break;
3862 case 43: // DXGI_FORMAT_R32_SINT
3863 format = F_r32i;
3864 component_type = T_int;
3865 func = read_dds_level_raw;
3866 break;
3867 case 48: // DXGI_FORMAT_R8G8_TYPELESS
3868 case 49: // DXGI_FORMAT_R8G8_UNORM
3869 format = F_rg;
3870 break;
3871 case 50: // DXGI_FORMAT_R8G8_UINT
3872 format = F_rg8i;
3873 break;
3874 case 51: // DXGI_FORMAT_R8G8_SNORM
3875 format = F_rg;
3876 component_type = T_byte;
3877 break;
3878 case 52: // DXGI_FORMAT_R8G8_SINT
3879 format = F_rg8i;
3880 component_type = T_byte;
3881 break;
3882 case 54: // DXGI_FORMAT_R16_FLOAT:
3883 format = F_r16;
3884 component_type = T_half_float;
3885 func = read_dds_level_raw;
3886 break;
3887 case 55: // DXGI_FORMAT_D16_UNORM:
3888 format = F_depth_component16;
3889 component_type = T_unsigned_short;
3890 func = read_dds_level_raw;
3891 break;
3892 case 56: // DXGI_FORMAT_R16_UNORM:
3893 format = F_r16;
3894 component_type = T_unsigned_short;
3895 func = read_dds_level_raw;
3896 break;
3897 case 57: // DXGI_FORMAT_R16_UINT:
3898 format = F_r16i;
3899 component_type = T_unsigned_short;
3900 func = read_dds_level_raw;
3901 break;
3902 case 58: // DXGI_FORMAT_R16_SNORM:
3903 format = F_r16;
3904 component_type = T_short;
3905 func = read_dds_level_raw;
3906 break;
3907 case 59: // DXGI_FORMAT_R16_SINT:
3908 format = F_r16i;
3909 component_type = T_short;
3910 func = read_dds_level_raw;
3911 break;
3912 case 60: // DXGI_FORMAT_R8_TYPELESS
3913 case 61: // DXGI_FORMAT_R8_UNORM
3914 format = F_red;
3915 break;
3916 case 62: // DXGI_FORMAT_R8_UINT
3917 format = F_r8i;
3918 break;
3919 case 63: // DXGI_FORMAT_R8_SNORM
3920 format = F_red;
3921 component_type = T_byte;
3922 break;
3923 case 64: // DXGI_FORMAT_R8_SINT
3924 format = F_r8i;
3925 component_type = T_byte;
3926 break;
3927 case 65: // DXGI_FORMAT_A8_UNORM
3928 format = F_alpha;
3929 break;
3930 case 70: // DXGI_FORMAT_BC1_TYPELESS
3931 case 71: // DXGI_FORMAT_BC1_UNORM
3932 format = F_rgb;
3933 compression = CM_dxt1;
3934 func = read_dds_level_bc1;
3935 break;
3936 case 72: // DXGI_FORMAT_BC1_UNORM_SRGB
3937 format = F_srgb;
3938 compression = CM_dxt1;
3939 func = read_dds_level_bc1;
3940 break;
3941 case 73: // DXGI_FORMAT_BC2_TYPELESS
3942 case 74: // DXGI_FORMAT_BC2_UNORM
3943 format = F_rgba;
3944 compression = CM_dxt3;
3945 func = read_dds_level_bc2;
3946 break;
3947 case 75: // DXGI_FORMAT_BC2_UNORM_SRGB
3948 format = F_srgb_alpha;
3949 compression = CM_dxt3;
3950 func = read_dds_level_bc2;
3951 break;
3952 case 76: // DXGI_FORMAT_BC3_TYPELESS
3953 case 77: // DXGI_FORMAT_BC3_UNORM
3954 format = F_rgba;
3955 compression = CM_dxt5;
3956 func = read_dds_level_bc3;
3957 break;
3958 case 78: // DXGI_FORMAT_BC3_UNORM_SRGB
3959 format = F_srgb_alpha;
3960 compression = CM_dxt5;
3961 func = read_dds_level_bc3;
3962 break;
3963 case 79: // DXGI_FORMAT_BC4_TYPELESS
3964 case 80: // DXGI_FORMAT_BC4_UNORM
3965 format = F_red;
3966 compression = CM_rgtc;
3967 func = read_dds_level_bc4;
3968 break;
3969 case 82: // DXGI_FORMAT_BC5_TYPELESS
3970 case 83: // DXGI_FORMAT_BC5_UNORM
3971 format = F_rg;
3972 compression = CM_rgtc;
3973 func = read_dds_level_bc5;
3974 break;
3975 case 87: // DXGI_FORMAT_B8G8R8A8_UNORM
3976 case 90: // DXGI_FORMAT_B8G8R8A8_TYPELESS
3977 format = F_rgba8;
3978 break;
3979 case 88: // DXGI_FORMAT_B8G8R8X8_UNORM
3980 case 92: // DXGI_FORMAT_B8G8R8X8_TYPELESS
3981 format = F_rgb8;
3982 break;
3983 case 91: // DXGI_FORMAT_B8G8R8A8_UNORM_SRGB
3984 format = F_srgb_alpha;
3985 break;
3986 case 93: // DXGI_FORMAT_B8G8R8X8_UNORM_SRGB
3987 format = F_srgb;
3988 break;
3989 case 115: // DXGI_FORMAT_B4G4R4A4_UNORM
3990 format = F_rgba4;
3991 break;
3992 default:
3993 gobj_cat.error()
3994 << filename << ": unsupported DXGI format " << dxgi_format << ".\n";
3995 return false;
3996 }
3997
3998 switch (dimension) {
3999 case 2: // DDS_DIMENSION_TEXTURE1D
4000 texture_type = TT_1d_texture;
4001 header.depth = 1;
4002 break;
4003 case 3: // DDS_DIMENSION_TEXTURE2D
4004 if (misc_flag & 0x4) { // DDS_RESOURCE_MISC_TEXTURECUBE
4005 if (array_size > 1) {
4006 texture_type = TT_cube_map_array;
4007 header.depth = array_size * 6;
4008 } else {
4009 texture_type = TT_cube_map;
4010 header.depth = 6;
4011 }
4012 } else {
4013 if (array_size > 1) {
4014 texture_type = TT_2d_texture_array;
4015 header.depth = array_size;
4016 } else {
4017 texture_type = TT_2d_texture;
4018 header.depth = 1;
4019 }
4020 }
4021 break;
4022 case 4: // DDS_DIMENSION_TEXTURE3D
4023 texture_type = TT_3d_texture;
4024 break;
4025 default:
4026 gobj_cat.error()
4027 << filename << ": unsupported dimension.\n";
4028 return false;
4029 }
4030
4031 } else if (header.pf.pf_flags & DDPF_FOURCC) {
4032 // Some compressed texture format.
4033 if (texture_type == TT_3d_texture) {
4034 gobj_cat.error()
4035 << filename << ": unsupported compression on 3-d texture.\n";
4036 return false;
4037 }
4038
4039 // Most of the compressed formats support alpha.
4040 format = F_rgba;
4041 switch (header.pf.four_cc) {
4042 case 0x31545844: // 'DXT1', little-endian.
4043 compression = CM_dxt1;
4044 func = read_dds_level_bc1;
4045 format = F_rgbm;
4046 break;
4047 case 0x32545844: // 'DXT2'
4048 compression = CM_dxt2;
4049 func = read_dds_level_bc2;
4050 break;
4051 case 0x33545844: // 'DXT3'
4052 compression = CM_dxt3;
4053 func = read_dds_level_bc2;
4054 break;
4055 case 0x34545844: // 'DXT4'
4056 compression = CM_dxt4;
4057 func = read_dds_level_bc3;
4058 break;
4059 case 0x35545844: // 'DXT5'
4060 compression = CM_dxt5;
4061 func = read_dds_level_bc3;
4062 break;
4063 case 0x31495441: // 'ATI1'
4064 case 0x55344342: // 'BC4U'
4065 compression = CM_rgtc;
4066 func = read_dds_level_bc4;
4067 format = F_red;
4068 break;
4069 case 0x32495441: // 'ATI2'
4070 case 0x55354342: // 'BC5U'
4071 compression = CM_rgtc;
4072 func = read_dds_level_bc5;
4073 format = F_rg;
4074 break;
4075 case 36: // D3DFMT_A16B16G16R16
4076 func = read_dds_level_abgr16;
4077 format = F_rgba16;
4078 component_type = T_unsigned_short;
4079 break;
4080 case 110: // D3DFMT_Q16W16V16U16
4081 func = read_dds_level_abgr16;
4082 format = F_rgba16;
4083 component_type = T_short;
4084 break;
4085 case 113: // D3DFMT_A16B16G16R16F
4086 func = read_dds_level_abgr16;
4087 format = F_rgba16;
4088 component_type = T_half_float;
4089 break;
4090 case 116: // D3DFMT_A32B32G32R32F
4091 func = read_dds_level_abgr32;
4092 format = F_rgba32;
4093 component_type = T_float;
4094 break;
4095 default:
4096 gobj_cat.error()
4097 << filename << ": unsupported texture compression (FourCC: 0x"
4098 << std::hex << header.pf.four_cc << std::dec << ").\n";
4099 return false;
4100 }
4101
4102 } else {
4103 // An uncompressed texture format.
4104 func = read_dds_level_generic_uncompressed;
4105
4106 if (header.pf.pf_flags & DDPF_ALPHAPIXELS) {
4107 // An uncompressed format that involves alpha.
4108 format = F_rgba;
4109 if (header.pf.rgb_bitcount == 32 &&
4110 header.pf.r_mask == 0x000000ff &&
4111 header.pf.g_mask == 0x0000ff00 &&
4112 header.pf.b_mask == 0x00ff0000 &&
4113 header.pf.a_mask == 0xff000000U) {
4114 func = read_dds_level_abgr8;
4115 } else if (header.pf.rgb_bitcount == 32 &&
4116 header.pf.r_mask == 0x00ff0000 &&
4117 header.pf.g_mask == 0x0000ff00 &&
4118 header.pf.b_mask == 0x000000ff &&
4119 header.pf.a_mask == 0xff000000U) {
4120 func = read_dds_level_rgba8;
4121
4122 } else if (header.pf.r_mask != 0 &&
4123 header.pf.g_mask == 0 &&
4124 header.pf.b_mask == 0) {
4125 func = read_dds_level_luminance_uncompressed;
4126 format = F_luminance_alpha;
4127 }
4128 } else {
4129 // An uncompressed format that doesn't involve alpha.
4130 if (header.pf.rgb_bitcount == 24 &&
4131 header.pf.r_mask == 0x00ff0000 &&
4132 header.pf.g_mask == 0x0000ff00 &&
4133 header.pf.b_mask == 0x000000ff) {
4134 func = read_dds_level_bgr8;
4135 } else if (header.pf.rgb_bitcount == 24 &&
4136 header.pf.r_mask == 0x000000ff &&
4137 header.pf.g_mask == 0x0000ff00 &&
4138 header.pf.b_mask == 0x00ff0000) {
4139 func = read_dds_level_rgb8;
4140
4141 } else if (header.pf.r_mask != 0 &&
4142 header.pf.g_mask == 0 &&
4143 header.pf.b_mask == 0) {
4144 func = read_dds_level_luminance_uncompressed;
4145 format = F_luminance;
4146 }
4147 }
4148 }
4149
4150 do_setup_texture(cdata, texture_type, header.width, header.height, header.depth,
4151 component_type, format);
4152
4153 cdata->_orig_file_x_size = cdata->_x_size;
4154 cdata->_orig_file_y_size = cdata->_y_size;
4155 cdata->_compression = compression;
4156 cdata->_ram_image_compression = compression;
4157
4158 if (!header_only) {
4159 switch (texture_type) {
4160 case TT_3d_texture:
4161 {
4162 // 3-d textures store all the depth slices for mipmap level 0, then
4163 // all the depth slices for mipmap level 1, and so on.
4164 for (int n = 0; n < (int)header.num_levels; ++n) {
4165 int z_size = do_get_expected_mipmap_z_size(cdata, n);
4166 pvector<PTA_uchar> pages;
4167 size_t page_size = 0;
4168 int z;
4169 for (z = 0; z < z_size; ++z) {
4170 PTA_uchar page = func(this, cdata, header, n, in);
4171 if (page.is_null()) {
4172 return false;
4173 }
4174 nassertr(page_size == 0 || page_size == page.size(), false);
4175 page_size = page.size();
4176 pages.push_back(page);
4177 }
4178 // Now reassemble the pages into one big image. Because this is a
4179 // Microsoft format, the images are stacked in reverse order; re-
4180 // reverse them.
4181 PTA_uchar image = PTA_uchar::empty_array(page_size * z_size);
4182 unsigned char *imagep = (unsigned char *)image.p();
4183 for (z = 0; z < z_size; ++z) {
4184 int fz = z_size - 1 - z;
4185 memcpy(imagep + z * page_size, pages[fz].p(), page_size);
4186 }
4187
4188 do_set_ram_mipmap_image(cdata, n, image, page_size);
4189 }
4190 }
4191 break;
4192
4193 case TT_cube_map:
4194 {
4195 // Cube maps store all the mipmap levels for face 0, then all the
4196 // mipmap levels for face 1, and so on.
4198 pages.reserve(6);
4199 int z, n;
4200 for (z = 0; z < 6; ++z) {
4201 pages.push_back(pvector<PTA_uchar>());
4202 pvector<PTA_uchar> &levels = pages.back();
4203 levels.reserve(header.num_levels);
4204
4205 for (n = 0; n < (int)header.num_levels; ++n) {
4206 PTA_uchar image = func(this, cdata, header, n, in);
4207 if (image.is_null()) {
4208 return false;
4209 }
4210 levels.push_back(image);
4211 }
4212 }
4213
4214 // Now, for each level, reassemble the pages into one big image.
4215 // Because this is a Microsoft format, the levels are arranged in a
4216 // rotated order.
4217 static const int level_remap[6] = {
4218 0, 1, 5, 4, 2, 3
4219 };
4220 for (n = 0; n < (int)header.num_levels; ++n) {
4221 size_t page_size = pages[0][n].size();
4222 PTA_uchar image = PTA_uchar::empty_array(page_size * 6);
4223 unsigned char *imagep = (unsigned char *)image.p();
4224 for (z = 0; z < 6; ++z) {
4225 int fz = level_remap[z];
4226 nassertr(pages[fz][n].size() == page_size, false);
4227 memcpy(imagep + z * page_size, pages[fz][n].p(), page_size);
4228 }
4229
4230 do_set_ram_mipmap_image(cdata, n, image, page_size);
4231 }
4232 }
4233 break;
4234
4235 case TT_2d_texture_array:
4236 case TT_cube_map_array: //TODO: rearrange cube map array faces?
4237 {
4238 // Texture arrays store all the mipmap levels for layer 0, then all
4239 // the mipmap levels for layer 1, and so on.
4241 pages.reserve(header.depth);
4242 int z, n;
4243 for (z = 0; z < (int)header.depth; ++z) {
4244 pages.push_back(pvector<PTA_uchar>());
4245 pvector<PTA_uchar> &levels = pages.back();
4246 levels.reserve(header.num_levels);
4247
4248 for (n = 0; n < (int)header.num_levels; ++n) {
4249 PTA_uchar image = func(this, cdata, header, n, in);
4250 if (image.is_null()) {
4251 return false;
4252 }
4253 levels.push_back(image);
4254 }
4255 }
4256
4257 // Now, for each level, reassemble the pages into one big image.
4258 for (n = 0; n < (int)header.num_levels; ++n) {
4259 size_t page_size = pages[0][n].size();
4260 PTA_uchar image = PTA_uchar::empty_array(page_size * header.depth);
4261 unsigned char *imagep = (unsigned char *)image.p();
4262 for (z = 0; z < (int)header.depth; ++z) {
4263 nassertr(pages[z][n].size() == page_size, false);
4264 memcpy(imagep + z * page_size, pages[z][n].p(), page_size);
4265 }
4266
4267 do_set_ram_mipmap_image(cdata, n, image, page_size);
4268 }
4269 }
4270 break;
4271
4272 default:
4273 // Normal 2-d textures simply store the mipmap levels.
4274 {
4275 for (int n = 0; n < (int)header.num_levels; ++n) {
4276 PTA_uchar image = func(this, cdata, header, n, in);
4277 if (image.is_null()) {
4278 return false;
4279 }
4280 do_set_ram_mipmap_image(cdata, n, image, 0);
4281 }
4282 }
4283 }
4284 cdata->_has_read_pages = true;
4285 cdata->_has_read_mipmaps = true;
4286 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
4287 }
4288
4289 if (in.fail()) {
4290 gobj_cat.error()
4291 << filename << ": truncated DDS file.\n";
4292 return false;
4293 }
4294
4295 cdata->_loaded_from_image = true;
4296 cdata->_loaded_from_txo = true;
4297
4298 return true;
4299}
4300
4301/**
4302 * Called internally when read() detects a KTX file. Assumes the lock is
4303 * already held.
4304 */
4305bool Texture::
4306do_read_ktx_file(CData *cdata, const Filename &fullpath, bool header_only) {
4308
4309 Filename filename = Filename::binary_filename(fullpath);
4310 PT(VirtualFile) file = vfs->get_file(filename);
4311 if (file == nullptr) {
4312 // No such file.
4313 gobj_cat.error()
4314 << "Could not find " << fullpath << "\n";
4315 return false;
4316 }
4317
4318 if (gobj_cat.is_debug()) {
4319 gobj_cat.debug()
4320 << "Reading KTX file " << filename << "\n";
4321 }
4322
4323 istream *in = file->open_read_file(true);
4324 if (in == nullptr) {
4325 gobj_cat.error()
4326 << "Failed to open " << filename << " for reading.\n";
4327 return false;
4328 }
4329
4330 bool success = do_read_ktx(cdata, *in, fullpath, header_only);
4331 vfs->close_read_file(in);
4332
4333 if (!has_name()) {
4334 set_name(fullpath.get_basename_wo_extension());
4335 }
4336
4337 cdata->_fullpath = fullpath;
4338 cdata->_alpha_fullpath = Filename();
4339 cdata->_keep_ram_image = false;
4340
4341 return success;
4342}
4343
4344/**
4345 *
4346 */
4347bool Texture::
4348do_read_ktx(CData *cdata, istream &in, const string &filename, bool header_only) {
4349 StreamReader ktx(in);
4350
4351 unsigned char magic[12];
4352 if (ktx.extract_bytes(magic, 12) != 12 ||
4353 memcmp(magic, "\xABKTX 11\xBB\r\n\x1A\n", 12) != 0) {
4354 gobj_cat.error()
4355 << filename << " is not a KTX file.\n";
4356 return false;
4357 }
4358
4359 // See: https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/
4360 uint32_t gl_type, /*type_size,*/ gl_format, internal_format, gl_base_format,
4361 width, height, depth, num_array_elements, num_faces, num_mipmap_levels,
4362 kvdata_size;
4363
4364 bool big_endian;
4365 if (ktx.get_uint32() == 0x04030201) {
4366 big_endian = false;
4367 gl_type = ktx.get_uint32();
4368 /*type_size = */ktx.get_uint32();
4369 gl_format = ktx.get_uint32();
4370 internal_format = ktx.get_uint32();
4371 gl_base_format = ktx.get_uint32();
4372 width = ktx.get_uint32();
4373 height = ktx.get_uint32();
4374 depth = ktx.get_uint32();
4375 num_array_elements = ktx.get_uint32();
4376 num_faces = ktx.get_uint32();
4377 num_mipmap_levels = ktx.get_uint32();
4378 kvdata_size = ktx.get_uint32();
4379 } else {
4380 big_endian = true;
4381 gl_type = ktx.get_be_uint32();
4382 /*type_size = */ktx.get_be_uint32();
4383 gl_format = ktx.get_be_uint32();
4384 internal_format = ktx.get_be_uint32();
4385 gl_base_format = ktx.get_be_uint32();
4386 width = ktx.get_be_uint32();
4387 height = ktx.get_be_uint32();
4388 depth = ktx.get_be_uint32();
4389 num_array_elements = ktx.get_be_uint32();
4390 num_faces = ktx.get_be_uint32();
4391 num_mipmap_levels = ktx.get_be_uint32();
4392 kvdata_size = ktx.get_be_uint32();
4393 }
4394
4395 // Skip metadata section.
4396 ktx.skip_bytes(kvdata_size);
4397
4398 ComponentType type;
4399 CompressionMode compression;
4400 Format format;
4401 bool swap_bgr = false;
4402
4403 if (gl_type == 0 || gl_format == 0) {
4404 // Compressed texture.
4405 if (gl_type > 0 || gl_format > 0) {
4406 gobj_cat.error()
4407 << "Compressed textures must have both type and format set to 0.\n";
4408 return false;
4409 }
4410 type = T_unsigned_byte;
4411 compression = CM_on;
4412
4413 KTXFormat base_format;
4414 switch ((KTXCompressedFormat)internal_format) {
4415 case KTX_COMPRESSED_RED:
4416 format = F_red;
4417 base_format = KTX_RED;
4418 break;
4419 case KTX_COMPRESSED_RG:
4420 format = F_rg;
4421 base_format = KTX_RG;
4422 break;
4423 case KTX_COMPRESSED_RGB:
4424 format = F_rgb;
4425 base_format = KTX_RGB;
4426 break;
4427 case KTX_COMPRESSED_RGBA:
4428 format = F_rgba;
4429 base_format = KTX_RGBA;
4430 break;
4431 case KTX_COMPRESSED_SRGB:
4432 format = F_srgb;
4433 base_format = KTX_SRGB;
4434 break;
4435 case KTX_COMPRESSED_SRGB_ALPHA:
4436 format = F_srgb_alpha;
4437 base_format = KTX_SRGB_ALPHA;
4438 break;
4439 case KTX_COMPRESSED_RGB_FXT1_3DFX:
4440 format = F_rgb;
4441 base_format = KTX_RGB;
4442 compression = CM_fxt1;
4443 break;
4444 case KTX_COMPRESSED_RGBA_FXT1_3DFX:
4445 format = F_rgba;
4446 base_format = KTX_RGBA;
4447 compression = CM_fxt1;
4448 break;
4449 case KTX_COMPRESSED_RGB_S3TC_DXT1:
4450 format = F_rgb;
4451 base_format = KTX_RGB;
4452 compression = CM_dxt1;
4453 break;
4454 case KTX_COMPRESSED_RGBA_S3TC_DXT1:
4455 format = F_rgbm;
4456 base_format = KTX_RGB;
4457 compression = CM_dxt1;
4458 break;
4459 case KTX_COMPRESSED_RGBA_S3TC_DXT3:
4460 format = F_rgba;
4461 base_format = KTX_RGBA;
4462 compression = CM_dxt3;
4463 break;
4464 case KTX_COMPRESSED_RGBA_S3TC_DXT5:
4465 format = F_rgba;
4466 base_format = KTX_RGBA;
4467 compression = CM_dxt5;
4468 break;
4469 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1:
4470 format = F_srgb_alpha;
4471 base_format = KTX_SRGB_ALPHA;
4472 compression = CM_dxt1;
4473 break;
4474 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3:
4475 format = F_srgb_alpha;
4476 base_format = KTX_SRGB_ALPHA;
4477 compression = CM_dxt3;
4478 break;
4479 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5:
4480 format = F_srgb_alpha;
4481 base_format = KTX_SRGB_ALPHA;
4482 compression = CM_dxt5;
4483 break;
4484 case KTX_COMPRESSED_SRGB_S3TC_DXT1:
4485 format = F_srgb;
4486 base_format = KTX_SRGB;
4487 compression = CM_dxt1;
4488 break;
4489 case KTX_COMPRESSED_RED_RGTC1:
4490 case KTX_COMPRESSED_SIGNED_RED_RGTC1:
4491 format = F_red;
4492 base_format = KTX_RED;
4493 compression = CM_rgtc;
4494 break;
4495 case KTX_COMPRESSED_RG_RGTC2:
4496 case KTX_COMPRESSED_SIGNED_RG_RGTC2:
4497 format = F_rg;
4498 base_format = KTX_RG;
4499 compression = CM_rgtc;
4500 break;
4501 case KTX_ETC1_RGB8:
4502 format = F_rgb;
4503 base_format = KTX_RGB;
4504 compression = CM_etc1;
4505 break;
4506 case KTX_ETC1_SRGB8:
4507 format = F_srgb;
4508 base_format = KTX_SRGB;
4509 compression = CM_etc1;
4510 break;
4511 case KTX_COMPRESSED_RGB8_ETC2:
4512 format = F_rgb;
4513 base_format = KTX_RGB;
4514 compression = CM_etc2;
4515 break;
4516 case KTX_COMPRESSED_SRGB8_ETC2:
4517 format = F_srgb;
4518 base_format = KTX_SRGB;
4519 compression = CM_etc2;
4520 break;
4521 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4522 format = F_rgbm;
4523 base_format = KTX_RGBA;
4524 compression = CM_etc2;
4525 break;
4526 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4527 format = F_rgbm;
4528 base_format = KTX_SRGB8_ALPHA8;
4529 compression = CM_etc2;
4530 break;
4531 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
4532 format = F_rgba;
4533 base_format = KTX_RGBA;
4534 compression = CM_etc2;
4535 break;
4536 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
4537 format = F_srgb_alpha;
4538 base_format = KTX_SRGB8_ALPHA8;
4539 compression = CM_etc2;
4540 break;
4541 case KTX_COMPRESSED_R11_EAC:
4542 case KTX_COMPRESSED_SIGNED_R11_EAC:
4543 format = F_red;
4544 base_format = KTX_RED;
4545 compression = CM_eac;
4546 break;
4547 case KTX_COMPRESSED_RG11_EAC:
4548 case KTX_COMPRESSED_SIGNED_RG11_EAC:
4549 format = F_rg;
4550 base_format = KTX_RG;
4551 compression = CM_eac;
4552 break;
4553 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1:
4554 format = F_srgb_alpha;
4555 base_format = KTX_SRGB_ALPHA;
4556 compression = CM_pvr1_2bpp;
4557 break;
4558 case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1:
4559 format = F_srgb_alpha;
4560 base_format = KTX_SRGB_ALPHA;
4561 compression = CM_pvr1_4bpp;
4562 break;
4563 case KTX_COMPRESSED_RGBA_BPTC_UNORM:
4564 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM:
4565 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT:
4566 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT:
4567 default:
4568 gobj_cat.error()
4569 << filename << " has unsupported compressed internal format " << internal_format << "\n";
4570 return false;
4571 }
4572
4573 if (base_format != gl_base_format) {
4574 gobj_cat.error()
4575 << filename << " has internal format that is incompatible with base "
4576 "format (0x" << std::hex << gl_base_format << ", expected 0x"
4577 << base_format << std::dec << ")\n";
4578 return false;
4579 }
4580
4581 } else {
4582 // Uncompressed texture.
4583 compression = CM_off;
4584 switch ((KTXType)gl_type) {
4585 case KTX_BYTE:
4586 type = T_byte;
4587 break;
4588 case KTX_UNSIGNED_BYTE:
4589 type = T_unsigned_byte;
4590 break;
4591 case KTX_SHORT:
4592 type = T_short;
4593 break;
4594 case KTX_UNSIGNED_SHORT:
4595 type = T_unsigned_short;
4596 break;
4597 case KTX_INT:
4598 type = T_int;
4599 break;
4600 case KTX_UNSIGNED_INT:
4601 type = T_unsigned_int;
4602 break;
4603 case KTX_FLOAT:
4604 type = T_float;
4605 break;
4606 case KTX_HALF_FLOAT:
4607 type = T_half_float;
4608 break;
4609 case KTX_UNSIGNED_INT_24_8:
4610 type = T_unsigned_int_24_8;
4611 break;
4612 default:
4613 gobj_cat.error()
4614 << filename << " has unsupported component type " << gl_type << "\n";
4615 return false;
4616 }
4617
4618 if (gl_format != gl_base_format) {
4619 gobj_cat.error()
4620 << filename << " has mismatched formats: " << gl_format << " != "
4621 << gl_base_format << "\n";
4622 }
4623
4624 switch (gl_format) {
4625 case KTX_DEPTH_COMPONENT:
4626 switch (internal_format) {
4627 case KTX_DEPTH_COMPONENT:
4628 format = F_depth_component;
4629 break;
4630 case KTX_DEPTH_COMPONENT16:
4631 format = F_depth_component16;
4632 break;
4633 case KTX_DEPTH_COMPONENT24:
4634 format = F_depth_component24;
4635 break;
4636 case KTX_DEPTH_COMPONENT32:
4637 case KTX_DEPTH_COMPONENT32F:
4638 format = F_depth_component32;
4639 break;
4640 default:
4641 format = F_depth_component;
4642 gobj_cat.warning()
4643 << filename << " has unsupported depth component format " << internal_format << "\n";
4644 }
4645 break;
4646
4647 case KTX_DEPTH_STENCIL:
4648 format = F_depth_stencil;
4649 if (internal_format != KTX_DEPTH_STENCIL &&
4650 internal_format != KTX_DEPTH24_STENCIL8) {
4651 gobj_cat.warning()
4652 << filename << " has unsupported depth stencil format " << internal_format << "\n";
4653 }
4654 break;
4655
4656 case KTX_RED:
4657 switch (internal_format) {
4658 case KTX_RED:
4659 case KTX_RED_SNORM:
4660 case KTX_R8:
4661 case KTX_R8_SNORM:
4662 format = F_red;
4663 break;
4664 case KTX_R16:
4665 case KTX_R16_SNORM:
4666 case KTX_R16F:
4667 format = F_r16;
4668 break;
4669 case KTX_R32F:
4670 format = F_r32;
4671 break;
4672 default:
4673 format = F_red;
4674 gobj_cat.warning()
4675 << filename << " has unsupported red format " << internal_format << "\n";
4676 }
4677 break;
4678
4679 case KTX_RED_INTEGER:
4680 switch (internal_format) {
4681 case KTX_R8I:
4682 case KTX_R8UI:
4683 format = F_r8i;
4684 break;
4685 case KTX_R16I:
4686 case KTX_R16UI:
4687 format = F_r16i;
4688 break;
4689 case KTX_R32I:
4690 case KTX_R32UI:
4691 format = F_r32i;
4692 break;
4693 default:
4694 gobj_cat.error()
4695 << filename << " has unsupported red integer format " << internal_format << "\n";
4696 return false;
4697 }
4698 break;
4699
4700 case KTX_GREEN:
4701 format = F_green;
4702 if (internal_format != KTX_GREEN) {
4703 gobj_cat.warning()
4704 << filename << " has unsupported green format " << internal_format << "\n";
4705 }
4706 break;
4707
4708 case KTX_BLUE:
4709 format = F_blue;
4710 if (internal_format != KTX_BLUE) {
4711 gobj_cat.warning()
4712 << filename << " has unsupported blue format " << internal_format << "\n";
4713 }
4714 break;
4715
4716 case KTX_RG:
4717 switch (internal_format) {
4718 case KTX_RG:
4719 case KTX_RG_SNORM:
4720 case KTX_RG8:
4721 case KTX_RG8_SNORM:
4722 format = F_rg;
4723 break;
4724 case KTX_RG16:
4725 case KTX_RG16_SNORM:
4726 case KTX_RG16F:
4727 format = F_rg16;
4728 break;
4729 case KTX_RG32F:
4730 format = F_rg32;
4731 break;
4732 default:
4733 format = F_rg;
4734 gobj_cat.warning()
4735 << filename << " has unsupported RG format " << internal_format << "\n";
4736 }
4737 break;
4738
4739 case KTX_RG_INTEGER:
4740 switch (internal_format) {
4741 case KTX_RG8I:
4742 case KTX_RG8UI:
4743 format = F_rg8i;
4744 break;
4745 case KTX_RG16I:
4746 case KTX_RG16UI:
4747 format = F_rg16i;
4748 break;
4749 case KTX_RG32I:
4750 case KTX_RG32UI:
4751 format = F_rg32i;
4752 break;
4753 default:
4754 gobj_cat.error()
4755 << filename << " has unsupported RG integer format " << internal_format << "\n";
4756 return false;
4757 }
4758 break;
4759
4760 case KTX_RGB:
4761 swap_bgr = true;
4762 case KTX_BGR:
4763 switch (internal_format) {
4764 case KTX_RGB:
4765 case KTX_RGB_SNORM:
4766 format = F_rgb;
4767 break;
4768 case KTX_RGB5:
4769 format = F_rgb5;
4770 break;
4771 case KTX_RGB12:
4772 format = F_rgb12;
4773 break;
4774 case KTX_R3_G3_B2:
4775 format = F_rgb332;
4776 break;
4777 case KTX_RGB9_E5:
4778 format = F_rgb9_e5;
4779 break;
4780 case KTX_R11F_G11F_B10F:
4781 format = F_r11_g11_b10;
4782 break;
4783 case KTX_RGB8:
4784 case KTX_RGB8_SNORM:
4785 format = F_rgb8;
4786 break;
4787 case KTX_RGB16:
4788 case KTX_RGB16_SNORM:
4789 case KTX_RGB16F:
4790 format = F_rgb16;
4791 break;
4792 case KTX_RGB32F:
4793 format = F_rgb32;
4794 break;
4795 case KTX_SRGB:
4796 case KTX_SRGB8:
4797 format = F_srgb;
4798 break;
4799 default:
4800 format = F_rgb;
4801 gobj_cat.warning()
4802 << filename << " has unsupported RGB format " << internal_format << "\n";
4803 }
4804 break;
4805
4806 case KTX_RGB_INTEGER:
4807 swap_bgr = true;
4808 case KTX_BGR_INTEGER:
4809 switch (internal_format) {
4810 case KTX_RGB8I:
4811 case KTX_RGB8UI:
4812 format = F_rgb8i;
4813 break;
4814 case KTX_RGB16I:
4815 case KTX_RGB16UI:
4816 format = F_rgb16i;
4817 break;
4818 case KTX_RGB32I:
4819 case KTX_RGB32UI:
4820 format = F_rgb32i;
4821 break;
4822 default:
4823 gobj_cat.error()
4824 << filename << " has unsupported RGB integer format " << internal_format << "\n";
4825 return false;
4826 }
4827 break;
4828
4829 case KTX_RGBA:
4830 swap_bgr = true;
4831 case KTX_BGRA:
4832 switch (internal_format) {
4833 case KTX_RGBA:
4834 case KTX_RGBA_SNORM:
4835 format = F_rgba;
4836 break;
4837 case KTX_RGBA4:
4838 format = F_rgba4;
4839 break;
4840 case KTX_RGB5_A1:
4841 format = F_rgba5;
4842 break;
4843 case KTX_RGBA12:
4844 format = F_rgba12;
4845 break;
4846 case KTX_RGB10_A2:
4847 format = F_rgb10_a2;
4848 break;
4849 case KTX_RGBA8:
4850 case KTX_RGBA8_SNORM:
4851 format = F_rgba8;
4852 break;
4853 case KTX_RGBA16:
4854 case KTX_RGBA16_SNORM:
4855 case KTX_RGBA16F:
4856 format = F_rgba16;
4857 break;
4858 case KTX_RGBA32F:
4859 format = F_rgba32;
4860 break;
4861 case KTX_SRGB_ALPHA:
4862 case KTX_SRGB8_ALPHA8:
4863 format = F_srgb_alpha;
4864 break;
4865 default:
4866 format = F_rgba;
4867 gobj_cat.warning()
4868 << filename << " has unsupported RGBA format " << internal_format << "\n";
4869 }
4870 break;
4871 break;
4872
4873 case KTX_RGBA_INTEGER:
4874 swap_bgr = true;
4875 case KTX_BGRA_INTEGER:
4876 switch (internal_format) {
4877 case KTX_RGBA8I:
4878 case KTX_RGBA8UI:
4879 format = F_rgba8i;
4880 break;
4881 case KTX_RGBA16I:
4882 case KTX_RGBA16UI:
4883 format = F_rgba16i;
4884 break;
4885 case KTX_RGBA32I:
4886 case KTX_RGBA32UI:
4887 format = F_rgba32i;
4888 break;
4889 default:
4890 gobj_cat.error()
4891 << filename << " has unsupported RGBA integer format " << internal_format << "\n";
4892 return false;
4893 }
4894 break;
4895
4896 case KTX_LUMINANCE:
4897 format = F_luminance;
4898 break;
4899
4900 case KTX_LUMINANCE_ALPHA:
4901 format = F_luminance_alpha;
4902 break;
4903
4904 case KTX_ALPHA:
4905 format = F_alpha;
4906 break;
4907
4908 case KTX_STENCIL_INDEX:
4909 default:
4910 gobj_cat.error()
4911 << filename << " has unsupported format " << gl_format << "\n";
4912 return false;
4913 }
4914 }
4915
4916 TextureType texture_type;
4917 if (depth > 0) {
4918 texture_type = TT_3d_texture;
4919
4920 } else if (num_faces > 1) {
4921 if (num_faces != 6) {
4922 gobj_cat.error()
4923 << filename << " has " << num_faces << " cube map faces, expected 6\n";
4924 return false;
4925 }
4926 if (width != height) {
4927 gobj_cat.error()
4928 << filename << " is cube map, but does not have square dimensions\n";
4929 return false;
4930 }
4931 if (num_array_elements > 0) {
4932 depth = num_array_elements * 6;
4933 texture_type = TT_cube_map_array;
4934 } else {
4935 depth = 6;
4936 texture_type = TT_cube_map;
4937 }
4938
4939 } else if (height > 0) {
4940 if (num_array_elements > 0) {
4941 depth = num_array_elements;
4942 texture_type = TT_2d_texture_array;
4943 } else {
4944 depth = 1;
4945 texture_type = TT_2d_texture;
4946 }
4947
4948 } else if (width > 0) {
4949 depth = 1;
4950 if (num_array_elements > 0) {
4951 height = num_array_elements;
4952 texture_type = TT_1d_texture_array;
4953 } else {
4954 height = 1;
4955 texture_type = TT_1d_texture;
4956 }
4957
4958 } else {
4959 gobj_cat.error()
4960 << filename << " has zero size\n";
4961 return false;
4962 }
4963
4964 do_setup_texture(cdata, texture_type, width, height, depth, type, format);
4965
4966 cdata->_orig_file_x_size = cdata->_x_size;
4967 cdata->_orig_file_y_size = cdata->_y_size;
4968 cdata->_compression = compression;
4969 cdata->_ram_image_compression = compression;
4970
4971 if (!header_only) {
4972 bool generate_mipmaps = false;
4973 if (num_mipmap_levels == 0) {
4974 generate_mipmaps = true;
4975 num_mipmap_levels = 1;
4976 }
4977
4978 for (uint32_t n = 0; n < num_mipmap_levels; ++n) {
4979 uint32_t image_size;
4980 if (big_endian) {
4981 image_size = ktx.get_be_uint32();
4982 } else {
4983 image_size = ktx.get_uint32();
4984 }
4985 PTA_uchar image;
4986
4987 if (compression == CM_off) {
4988 uint32_t row_size = do_get_expected_mipmap_x_size(cdata, (int)n) * cdata->_num_components * cdata->_component_width;
4989 uint32_t num_rows = do_get_expected_mipmap_y_size(cdata, (int)n) * do_get_expected_mipmap_z_size(cdata, (int)n);
4990 uint32_t row_padded = (row_size + 3) & ~3;
4991
4992 if (image_size == row_size * num_rows) {
4993 if (row_padded != row_size) {
4994 // Someone tightly packed the image. This is invalid, but because
4995 // we like it tightly packed too, we'll read it anyway.
4996 gobj_cat.warning()
4997 << filename << " does not have proper row padding for mipmap "
4998 "level " << n << "\n";
4999 }
5000 image = PTA_uchar::empty_array(image_size);
5001 ktx.extract_bytes(image.p(), image_size);
5002
5003 } else if (image_size != row_padded * num_rows) {
5004 gobj_cat.error()
5005 << filename << " has invalid image size " << image_size
5006 << " for mipmap level " << n << " (expected "
5007 << row_padded * num_rows << ")\n";
5008 return false;
5009
5010 } else {
5011 // Read it row by row.
5012 image = PTA_uchar::empty_array(row_size * num_rows);
5013 uint32_t skip = row_padded - row_size;
5014 unsigned char *p = image.p();
5015 for (uint32_t row = 0; row < num_rows; ++row) {
5016 ktx.extract_bytes(p, row_size);
5017 ktx.skip_bytes(skip);
5018 p += row_size;
5019 }
5020 }
5021
5022 // Swap red and blue channels if necessary to match Panda conventions.
5023 if (swap_bgr) {
5024 unsigned char *begin = image.p();
5025 const unsigned char *end = image.p() + image.size();
5026 size_t skip = cdata->_num_components;
5027 nassertr(skip == 3 || skip == 4, false);
5028
5029 switch (cdata->_component_width) {
5030 case 1:
5031 for (unsigned char *p = begin; p < end; p += skip) {
5032 swap(p[0], p[2]);
5033 }
5034 break;
5035 case 2:
5036 for (short *p = (short *)begin; p < (short *)end; p += skip) {
5037 swap(p[0], p[2]);
5038 }
5039 break;
5040 case 4:
5041 for (int *p = (int *)begin; p < (int *)end; p += skip) {
5042 swap(p[0], p[2]);
5043 }
5044 break;
5045 default:
5046 nassert_raise("unexpected channel count");
5047 return false;
5048 }
5049 }
5050
5051 do_set_ram_mipmap_image(cdata, (int)n, std::move(image),
5052 row_size * do_get_expected_mipmap_y_size(cdata, (int)n));
5053
5054 } else {
5055 // Compressed image. We'll trust that the file has the right size.
5056 image = PTA_uchar::empty_array(image_size);
5057 ktx.extract_bytes(image.p(), image_size);
5058 do_set_ram_mipmap_image(cdata, (int)n, std::move(image), image_size / depth);
5059 }
5060
5061 ktx.skip_bytes(3 - ((image_size + 3) & 3));
5062 }
5063
5064 cdata->_has_read_pages = true;
5065 cdata->_has_read_mipmaps = true;
5066 cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
5067
5068 if (generate_mipmaps) {
5069 do_generate_ram_mipmap_images(cdata, false);
5070 }
5071 }
5072
5073 if (in.fail()) {
5074 gobj_cat.error()
5075 << filename << ": truncated KTX file.\n";
5076 return false;
5077 }
5078
5079 cdata->_loaded_from_image = true;
5080 cdata->_loaded_from_txo = true;
5081
5082 return true;
5083}
5084
5085/**
5086 * Internal method to write a series of pages and/or mipmap levels to disk
5087 * files.
5088 */
5089bool Texture::
5090do_write(CData *cdata,
5091 const Filename &fullpath, int z, int n, bool write_pages, bool write_mipmaps) {
5092 if (is_txo_filename(fullpath)) {
5093 if (!do_has_bam_rawdata(cdata)) {
5094 do_get_bam_rawdata(cdata);
5095 }
5096 nassertr(do_has_bam_rawdata(cdata), false);
5097 return do_write_txo_file(cdata, fullpath);
5098 }
5099
5100 if (!do_has_uncompressed_ram_image(cdata)) {
5101 do_get_uncompressed_ram_image(cdata);
5102 }
5103
5104 nassertr(do_has_ram_mipmap_image(cdata, n), false);
5105 nassertr(cdata->_ram_image_compression == CM_off, false);
5106
5107 if (write_pages && write_mipmaps) {
5108 // Write a sequence of pages * mipmap levels.
5109 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5110 int num_levels = cdata->_ram_images.size();
5111
5112 for (int n = 0; n < num_levels; ++n) {
5113 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
5114
5115 for (z = 0; z < num_pages; ++z) {
5116 Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
5117
5118 if (!n_pattern.has_hash()) {
5119 gobj_cat.error()
5120 << "Filename requires two different hash sequences: " << fullpath
5121 << "\n";
5122 return false;
5123 }
5124
5125 if (!do_write_one(cdata, n_pattern.get_filename_index(n), z, n)) {
5126 return false;
5127 }
5128 }
5129 }
5130
5131 } else if (write_pages) {
5132 // Write a sequence of pages.
5133 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5134 if (!fullpath_pattern.has_hash()) {
5135 gobj_cat.error()
5136 << "Filename requires a hash mark: " << fullpath
5137 << "\n";
5138 return false;
5139 }
5140
5141 int num_pages = cdata->_z_size * cdata->_num_views;
5142 for (z = 0; z < num_pages; ++z) {
5143 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(z), z, n)) {
5144 return false;
5145 }
5146 }
5147
5148 } else if (write_mipmaps) {
5149 // Write a sequence of mipmap images.
5150 Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5151 if (!fullpath_pattern.has_hash()) {
5152 gobj_cat.error()
5153 << "Filename requires a hash mark: " << fullpath
5154 << "\n";
5155 return false;
5156 }
5157
5158 int num_levels = cdata->_ram_images.size();
5159 for (int n = 0; n < num_levels; ++n) {
5160 if (!do_write_one(cdata, fullpath_pattern.get_filename_index(n), z, n)) {
5161 return false;
5162 }
5163 }
5164
5165 } else {
5166 // Write a single file.
5167 if (!do_write_one(cdata, fullpath, z, n)) {
5168 return false;
5169 }
5170 }
5171
5172 return true;
5173}
5174
5175/**
5176 * Internal method to write the indicated page and mipmap level to a disk
5177 * image file.
5178 */
5179bool Texture::
5180do_write_one(CData *cdata, const Filename &fullpath, int z, int n) {
5181 if (!do_has_ram_mipmap_image(cdata, n)) {
5182 return false;
5183 }
5184
5185 nassertr(cdata->_ram_image_compression == CM_off, false);
5186
5187 bool success;
5188 if (cdata->_component_type == T_float) {
5189 // Writing a floating-point texture.
5190 PfmFile pfm;
5191 if (!do_store_one(cdata, pfm, z, n)) {
5192 return false;
5193 }
5194 success = pfm.write(fullpath);
5195 } else {
5196 // Writing a normal, integer texture.
5197 PNMFileType *type =
5199 if (type == nullptr) {
5200 gobj_cat.error()
5201 << "Texture::write() - couldn't determine type from extension: " << fullpath << endl;
5202 return false;
5203 }
5204
5205 PNMImage pnmimage;
5206 if (!do_store_one(cdata, pnmimage, z, n)) {
5207 return false;
5208 }
5209 success = pnmimage.write(fullpath, type);
5210 }
5211
5212 if (!success) {
5213 gobj_cat.error()
5214 << "Texture::write() - couldn't write: " << fullpath << endl;
5215 return false;
5216 }
5217
5218 return true;
5219}
5220
5221/**
5222 * Internal method to copy a page and/or mipmap level to a PNMImage.
5223 */
5224bool Texture::
5225do_store_one(CData *cdata, PNMImage &pnmimage, int z, int n) {
5226 // First, reload the ram image if necessary.
5227 do_get_uncompressed_ram_image(cdata);
5228
5229 if (!do_has_ram_mipmap_image(cdata, n)) {
5230 return false;
5231 }
5232
5233 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5234 nassertr(cdata->_ram_image_compression == CM_off, false);
5235
5236 if (cdata->_component_type == T_float) {
5237 // PNMImage by way of PfmFile.
5238 PfmFile pfm;
5239 bool success = convert_to_pfm(pfm,
5240 do_get_expected_mipmap_x_size(cdata, n),
5241 do_get_expected_mipmap_y_size(cdata, n),
5242 cdata->_num_components, cdata->_component_width,
5243 cdata->_ram_images[n]._image,
5244 do_get_ram_mipmap_page_size(cdata, n), z);
5245 if (!success) {
5246 return false;
5247 }
5248 return pfm.store(pnmimage);
5249 }
5250
5251 return convert_to_pnmimage(pnmimage,
5252 do_get_expected_mipmap_x_size(cdata, n),
5253 do_get_expected_mipmap_y_size(cdata, n),
5254 cdata->_num_components, cdata->_component_type,
5255 is_srgb(cdata->_format),
5256 cdata->_ram_images[n]._image,
5257 do_get_ram_mipmap_page_size(cdata, n), z);
5258}
5259
5260/**
5261 * Internal method to copy a page and/or mipmap level to a PfmFile.
5262 */
5263bool Texture::
5264do_store_one(CData *cdata, PfmFile &pfm, int z, int n) {
5265 // First, reload the ram image if necessary.
5266 do_get_uncompressed_ram_image(cdata);
5267
5268 if (!do_has_ram_mipmap_image(cdata, n)) {
5269 return false;
5270 }
5271
5272 nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5273 nassertr(cdata->_ram_image_compression == CM_off, false);
5274
5275 if (cdata->_component_type != T_float) {
5276 // PfmFile by way of PNMImage.
5277 PNMImage pnmimage;
5278 bool success =
5279 convert_to_pnmimage(pnmimage,
5280 do_get_expected_mipmap_x_size(cdata, n),
5281 do_get_expected_mipmap_y_size(cdata, n),
5282 cdata->_num_components, cdata->_component_type,
5283 is_srgb(cdata->_format),
5284 cdata->_ram_images[n]._image,
5285 do_get_ram_mipmap_page_size(cdata, n), z);
5286 if (!success) {
5287 return false;
5288 }
5289 return pfm.load(pnmimage);
5290 }
5291
5292 return convert_to_pfm(pfm,
5293 do_get_expected_mipmap_x_size(cdata, n),
5294 do_get_expected_mipmap_y_size(cdata, n),
5295 cdata->_num_components, cdata->_component_width,
5296 cdata->_ram_images[n]._image,
5297 do_get_ram_mipmap_page_size(cdata, n), z);
5298}
5299
5300/**
5301 * Called internally when write() detects a txo filename.
5302 */
5303bool Texture::
5304do_write_txo_file(const CData *cdata, const Filename &fullpath) const {
5306 Filename filename = Filename::binary_filename(fullpath);
5307 ostream *out = vfs->open_write_file(filename, true, true);
5308 if (out == nullptr) {
5309 gobj_cat.error()
5310 << "Unable to open " << filename << "\n";
5311 return false;
5312 }
5313
5314 bool success = do_write_txo(cdata, *out, fullpath);
5315 vfs->close_write_file(out);
5316 return success;
5317}
5318
5319/**
5320 *
5321 */
5322bool Texture::
5323do_write_txo(const CData *cdata, ostream &out, const string &filename) const {
5324 DatagramOutputFile dout;
5325
5326 if (!dout.open(out, filename)) {
5327 gobj_cat.error()
5328 << "Could not write texture object: " << filename << "\n";
5329 return false;
5330 }
5331
5332 if (!dout.write_header(_bam_header)) {
5333 gobj_cat.error()
5334 << "Unable to write to " << filename << "\n";
5335 return false;
5336 }
5337
5338 BamWriter writer(&dout);
5339 if (!writer.init()) {
5340 return false;
5341 }
5342
5343 writer.set_file_texture_mode(BamWriter::BTM_rawdata);
5344
5345 if (!writer.write_object(this)) {
5346 return false;
5347 }
5348
5349 if (!do_has_bam_rawdata(cdata)) {
5350 gobj_cat.error()
5351 << get_name() << " does not have ram image\n";
5352 return false;
5353 }
5354
5355 return true;
5356}
5357
5358/**
5359 * If the texture has a ram image already, this acquires the CData write lock
5360 * and returns it.
5361 *
5362 * If the texture lacks a ram image, this performs do_reload_ram_image(), but
5363 * without holding the lock on this particular Texture object, to avoid
5364 * holding the lock across what might be a slow operation. Instead, the
5365 * reload is performed in a copy of the texture object, and then the lock is
5366 * acquired and the data is copied in.
5367 *
5368 * In any case, the return value is a locked CData object, which must be
5369 * released with an explicit call to release_write(). The CData object will
5370 * have a ram image unless for some reason do_reload_ram_image() fails.
5371 */
5372Texture::CData *Texture::
5373unlocked_ensure_ram_image(bool allow_compression) {
5374 Thread *current_thread = Thread::get_current_thread();
5375
5376 // First, wait for any other threads that might be simultaneously performing
5377 // the same operation.
5378 MutexHolder holder(_lock);
5379 while (_reloading) {
5380 _cvar.wait();
5381 }
5382
5383 // Then make sure we still need to reload before continuing.
5384 const CData *cdata = _cycler.read(current_thread);
5385 bool has_ram_image = do_has_ram_image(cdata);
5386 if (has_ram_image && !allow_compression && cdata->_ram_image_compression != Texture::CM_off) {
5387 // If we don't want compression, but the ram image we have is pre-
5388 // compressed, we don't consider it.
5389 has_ram_image = false;
5390 }
5391 if (has_ram_image || !do_can_reload(cdata)) {
5392 // We don't need to reload after all, or maybe we can't reload anyway.
5393 // Return, but elevate the lock first, as we promised.
5394 return _cycler.elevate_read_upstream(cdata, false, current_thread);
5395 }
5396
5397 // We need to reload.
5398 nassertr(!_reloading, nullptr);
5399 _reloading = true;
5400
5401 PT(Texture) tex = do_make_copy(cdata);
5402 _cycler.release_read(cdata);
5403 _lock.unlock();
5404
5405 // Perform the actual reload in a copy of the texture, while our own mutex
5406 // is left unlocked.
5407 CDWriter cdata_tex(tex->_cycler, true);
5408 tex->do_reload_ram_image(cdata_tex, allow_compression);
5409
5410 _lock.lock();
5411
5412 CData *cdataw = _cycler.write_upstream(false, current_thread);
5413
5414 // Rather than calling do_assign(), which would copy *all* of the reloaded
5415 // texture's properties over, we only copy in the ones which are relevant to
5416 // the ram image. This way, if the properties have changed during the
5417 // reload (for instance, because we reloaded a txo), it won't contaminate
5418 // the original texture.
5419 cdataw->_orig_file_x_size = cdata_tex->_orig_file_x_size;
5420 cdataw->_orig_file_y_size = cdata_tex->_orig_file_y_size;
5421
5422 // If any of *these* properties have changed, the texture has changed in
5423 // some fundamental way. Update it appropriately.
5424 if (cdata_tex->_x_size != cdataw->_x_size ||
5425 cdata_tex->_y_size != cdataw->_y_size ||
5426 cdata_tex->_z_size != cdataw->_z_size ||
5427 cdata_tex->_num_views != cdataw->_num_views ||
5428 cdata_tex->_num_components != cdataw->_num_components ||
5429 cdata_tex->_component_width != cdataw->_component_width ||
5430 cdata_tex->_texture_type != cdataw->_texture_type ||
5431 cdata_tex->_component_type != cdataw->_component_type) {
5432
5433 cdataw->_x_size = cdata_tex->_x_size;
5434 cdataw->_y_size = cdata_tex->_y_size;
5435 cdataw->_z_size = cdata_tex->_z_size;
5436 cdataw->_num_views = cdata_tex->_num_views;
5437
5438 cdataw->_num_components = cdata_tex->_num_components;
5439 cdataw->_component_width = cdata_tex->_component_width;
5440 cdataw->_texture_type = cdata_tex->_texture_type;
5441 cdataw->_format = cdata_tex->_format;
5442 cdataw->_component_type = cdata_tex->_component_type;
5443
5444 cdataw->inc_properties_modified();
5445 cdataw->inc_image_modified();
5446 }
5447
5448 cdataw->_keep_ram_image = cdata_tex->_keep_ram_image;
5449 cdataw->_ram_image_compression = cdata_tex->_ram_image_compression;
5450 cdataw->_ram_images = cdata_tex->_ram_images;
5451
5452 nassertr(_reloading, nullptr);
5453 _reloading = false;
5454
5455 // We don't generally increment the cdata->_image_modified semaphore,
5456 // because this is just a reload, and presumably the image hasn't changed
5457 // (unless we hit the if condition above).
5458
5459 _cvar.notify_all();
5460
5461 // Return the still-locked cdata.
5462 return cdataw;
5463}
5464
5465/**
5466 * Called when the Texture image is required but the ram image is not
5467 * available, this will reload it from disk or otherwise do whatever is
5468 * required to make it available, if possible.
5469 *
5470 * Assumes the lock is already held. The lock will be held during the
5471 * duration of this operation.
5472 */
5473void Texture::
5474do_reload_ram_image(CData *cdata, bool allow_compression) {
5476 PT(BamCacheRecord) record;
5477
5478 if (!do_has_compression(cdata)) {
5479 allow_compression = false;
5480 }
5481
5482 if ((cache->get_cache_textures() || (allow_compression && cache->get_cache_compressed_textures())) && !textures_header_only) {
5483 // See if the texture can be found in the on-disk cache, if it is active.
5484
5485 record = cache->lookup(cdata->_fullpath, "txo");
5486 if (record != nullptr &&
5487 record->has_data()) {
5488 PT(Texture) tex = DCAST(Texture, record->get_data());
5489
5490 // But don't use the cache record if the config parameters have changed,
5491 // and we want a different-sized texture now.
5492 int x_size = cdata->_orig_file_x_size;
5493 int y_size = cdata->_orig_file_y_size;
5494 do_adjust_this_size(cdata, x_size, y_size, cdata->_filename.get_basename(), true);
5495 if (x_size != tex->get_x_size() || y_size != tex->get_y_size()) {
5496 if (gobj_cat.is_debug()) {
5497 gobj_cat.debug()
5498 << "Cached texture " << *this << " has size "
5499 << tex->get_x_size() << " x " << tex->get_y_size()
5500 << " instead of " << x_size << " x " << y_size
5501 << "; ignoring cache.\n";
5502 }
5503 } else {
5504 // Also don't keep the cached version if it's compressed but we want
5505 // uncompressed.
5506 if (!allow_compression && tex->get_ram_image_compression() != Texture::CM_off) {
5507 if (gobj_cat.is_debug()) {
5508 gobj_cat.debug()
5509 << "Cached texture " << *this
5510 << " is compressed in cache; ignoring cache.\n";
5511 }
5512 } else {
5513 gobj_cat.info()
5514 << "Texture " << get_name() << " reloaded from disk cache\n";
5515 // We don't want to replace all the texture parameters--for
5516 // instance, we don't want to change the filter type or the border
5517 // color or anything--we just want to get the image and necessary
5518 // associated parameters.
5519 CDReader cdata_tex(tex->_cycler);
5520 cdata->_x_size = cdata_tex->_x_size;
5521 cdata->_y_size = cdata_tex->_y_size;
5522 if (cdata->_num_components != cdata_tex->_num_components) {
5523 cdata->_num_components = cdata_tex->_num_components;
5524 cdata->_format = cdata_tex->_format;
5525 }
5526 cdata->_component_type = cdata_tex->_component_type;
5527 cdata->_compression = cdata_tex->_compression;
5528 cdata->_ram_image_compression = cdata_tex->_ram_image_compression;
5529 cdata->_ram_images = cdata_tex->_ram_images;
5530 cdata->_loaded_from_image = true;
5531
5532 bool was_compressed = (cdata->_ram_image_compression != CM_off);
5533 if (do_consider_auto_process_ram_image(cdata, uses_mipmaps(), allow_compression)) {
5534 bool is_compressed = (cdata->_ram_image_compression != CM_off);
5535 if (!was_compressed && is_compressed &&
5537 // We've re-compressed the image after loading it from the
5538 // cache. To keep the cache current, rewrite it to the cache
5539 // now, in its newly compressed form.
5540 record->set_data(this, this);
5541 cache->store(record);
5542 }
5543 }
5544
5545 return;
5546 }
5547 }
5548 }
5549 }
5550
5551 gobj_cat.info()
5552 << "Reloading texture " << get_name() << "\n";
5553
5554 int z = 0;
5555 int n = 0;
5556
5557 if (cdata->_has_read_pages) {
5558 z = cdata->_z_size;
5559 }
5560 if (cdata->_has_read_mipmaps) {
5561 n = cdata->_num_mipmap_levels_read;
5562 }
5563
5564 cdata->_loaded_from_image = false;
5565 Format orig_format = cdata->_format;
5566 int orig_num_components = cdata->_num_components;
5567
5568 LoaderOptions options;
5569 if (allow_compression) {
5570 options.set_texture_flags(LoaderOptions::TF_preload |
5571 LoaderOptions::TF_allow_compression);
5572 } else {
5573 options.set_texture_flags(LoaderOptions::TF_preload);
5574 }
5575 do_read(cdata, cdata->_fullpath, cdata->_alpha_fullpath,
5576 cdata->_primary_file_num_channels, cdata->_alpha_file_channel,
5577 z, n, cdata->_has_read_pages, cdata->_has_read_mipmaps, options, nullptr);
5578
5579 if (orig_num_components == cdata->_num_components) {
5580 // Restore the original format, in case it was needlessly changed during
5581 // the reload operation.
5582 cdata->_format = orig_format;
5583 }
5584
5585 if (do_has_ram_image(cdata) && record != nullptr) {
5586 if (cache->get_cache_textures() || (cdata->_ram_image_compression != CM_off && cache->get_cache_compressed_textures())) {
5587 // Update the cache.
5588 if (record != nullptr) {
5589 record->add_dependent_file(cdata->_fullpath);
5590 }
5591 record->set_data(this, this);
5592 cache->store(record);
5593 }
5594 }
5595}
5596
5597/**
5598 * This is called internally to uniquify the ram image pointer without
5599 * updating cdata->_image_modified.
5600 */
5601PTA_uchar Texture::
5602do_modify_ram_image(CData *cdata) {
5603 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty() ||
5604 cdata->_ram_image_compression != CM_off) {
5605 do_make_ram_image(cdata);
5606 } else {
5607 do_clear_ram_mipmap_images(cdata);
5608 }
5609 return cdata->_ram_images[0]._image;
5610}
5611
5612/**
5613 * This is called internally to make a new ram image without updating
5614 * cdata->_image_modified.
5615 */
5616PTA_uchar Texture::
5617do_make_ram_image(CData *cdata) {
5618 int image_size = do_get_expected_ram_image_size(cdata);
5619 cdata->_ram_images.clear();
5620 cdata->_ram_images.push_back(RamImage());
5621 cdata->_ram_images[0]._page_size = do_get_expected_ram_page_size(cdata);
5622 cdata->_ram_images[0]._image = PTA_uchar::empty_array(image_size, get_class_type());
5623 cdata->_ram_images[0]._pointer_image = nullptr;
5624 cdata->_ram_image_compression = CM_off;
5625
5626 if (cdata->_has_clear_color) {
5627 // Fill the image with the clear color.
5628 unsigned char pixel[16];
5629 const int pixel_size = do_get_clear_data(cdata, pixel);
5630 nassertr(pixel_size > 0, cdata->_ram_images[0]._image);
5631
5632 unsigned char *image_data = cdata->_ram_images[0]._image;
5633 for (int i = 0; i < image_size; i += pixel_size) {
5634 memcpy(image_data + i, pixel, pixel_size);
5635 }
5636 }
5637
5638 return cdata->_ram_images[0]._image;
5639}
5640
5641/**
5642 * Replaces the current system-RAM image with the new data. If compression is
5643 * not CM_off, it indicates that the new data is already pre-compressed in the
5644 * indicated format.
5645 *
5646 * This does *not* affect keep_ram_image.
5647 */
5648void Texture::
5649do_set_ram_image(CData *cdata, CPTA_uchar image, Texture::CompressionMode compression,
5650 size_t page_size) {
5651 nassertv(compression != CM_default);
5652 nassertv(compression != CM_off || image.size() == do_get_expected_ram_image_size(cdata));
5653 if (cdata->_ram_images.empty()) {
5654 cdata->_ram_images.push_back(RamImage());
5655 } else {
5656 do_clear_ram_mipmap_images(cdata);
5657 }
5658 if (page_size == 0) {
5659 page_size = image.size();
5660 }
5661 if (cdata->_ram_images[0]._image != image ||
5662 cdata->_ram_images[0]._page_size != page_size ||
5663 cdata->_ram_image_compression != compression) {
5664 cdata->_ram_images[0]._image = image.cast_non_const();
5665 cdata->_ram_images[0]._page_size = page_size;
5666 cdata->_ram_images[0]._pointer_image = nullptr;
5667 cdata->_ram_image_compression = compression;
5668 cdata->inc_image_modified();
5669 }
5670}
5671
5672/**
5673 * This is called internally to uniquify the nth mipmap image pointer without
5674 * updating cdata->_image_modified.
5675 */
5676PTA_uchar Texture::
5677do_modify_ram_mipmap_image(CData *cdata, int n) {
5678 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar());
5679
5680 if (n >= (int)cdata->_ram_images.size() ||
5681 cdata->_ram_images[n]._image.empty()) {
5682 do_make_ram_mipmap_image(cdata, n);
5683 }
5684 return cdata->_ram_images[n]._image;
5685}
5686
5687/**
5688 *
5689 */
5690PTA_uchar Texture::
5691do_make_ram_mipmap_image(CData *cdata, int n) {
5692 nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar(get_class_type()));
5693
5694 while (n >= (int)cdata->_ram_images.size()) {
5695 cdata->_ram_images.push_back(RamImage());
5696 }
5697
5698 size_t image_size = do_get_expected_ram_mipmap_image_size(cdata, n);
5699 cdata->_ram_images[n]._image = PTA_uchar::empty_array(image_size, get_class_type());
5700 cdata->_ram_images[n]._pointer_image = nullptr;
5701 cdata->_ram_images[n]._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
5702
5703 if (cdata->_has_clear_color) {
5704 // Fill the image with the clear color.
5705 unsigned char pixel[16];
5706 const size_t pixel_size = (size_t)do_get_clear_data(cdata, pixel);
5707 nassertr(pixel_size > 0, cdata->_ram_images[n]._image);
5708
5709 unsigned char *image_data = cdata->_ram_images[n]._image;
5710 for (size_t i = 0; i < image_size; i += pixel_size) {
5711 memcpy(image_data + i, pixel, pixel_size);
5712 }
5713 }
5714
5715 return cdata->_ram_images[n]._image;
5716}
5717
5718/**
5719 *
5720 */
5721void Texture::
5722do_set_ram_mipmap_image(CData *cdata, int n, CPTA_uchar image, size_t page_size) {
5723 nassertv(cdata->_ram_image_compression != CM_off || image.size() == do_get_expected_ram_mipmap_image_size(cdata, n));
5724
5725 while (n >= (int)cdata->_ram_images.size()) {
5726 cdata->_ram_images.push_back(RamImage());
5727 }
5728 if (page_size == 0) {
5729 page_size = image.size();
5730 }
5731
5732 if (cdata->_ram_images[n]._image != image ||
5733 cdata->_ram_images[n]._page_size != page_size) {
5734 cdata->_ram_images[n]._image = image.cast_non_const();
5735 cdata->_ram_images[n]._pointer_image = nullptr;
5736 cdata->_ram_images[n]._page_size = page_size;
5737 cdata->inc_image_modified();
5738 }
5739}
5740
5741/**
5742 * Returns a string with a single pixel representing the clear color of the
5743 * texture in the format of this texture.
5744 *
5745 * In other words, to create an uncompressed RAM texture filled with the clear
5746 * color, it should be initialized with this string repeated for every pixel.
5747 */
5748size_t Texture::
5749do_get_clear_data(const CData *cdata, unsigned char *into) const {
5750 nassertr(cdata->_has_clear_color, 0);
5751
5752 int num_components = cdata->_num_components;
5753 nassertr(num_components > 0, 0);
5754 nassertr(num_components <= 4, 0);
5755
5756 LVecBase4 clear_value = cdata->_clear_color;
5757
5758 // Swap red and blue components.
5759 if (num_components >= 3) {
5760 std::swap(clear_value[0], clear_value[2]);
5761 }
5762
5763 switch (cdata->_component_type) {
5764 case T_unsigned_byte:
5765 if (is_srgb(cdata->_format)) {
5766 xel color;
5767 xelval alpha;
5768 encode_sRGB_uchar(clear_value, color, alpha);
5769 switch (num_components) {
5770 case 4: into[3] = (unsigned char)alpha;
5771 case 3: into[2] = (unsigned char)color.b;
5772 case 2: into[1] = (unsigned char)color.g;
5773 case 1: into[0] = (unsigned char)color.r;
5774 }
5775 } else {
5776 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5777 scaled *= 255;
5778 for (int i = 0; i < num_components; ++i) {
5779 into[i] = (unsigned char)scaled[i];
5780 }
5781 }
5782 break;
5783
5784 case T_unsigned_short:
5785 {
5786 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5787 scaled *= 65535;
5788 for (int i = 0; i < num_components; ++i) {
5789 ((unsigned short *)into)[i] = (unsigned short)scaled[i];
5790 }
5791 break;
5792 }
5793
5794 case T_float:
5795 for (int i = 0; i < num_components; ++i) {
5796 ((float *)into)[i] = clear_value[i];
5797 }
5798 break;
5799
5800 case T_unsigned_int_24_8:
5801 nassertr(num_components == 1, 0);
5802 *((unsigned int *)into) =
5803 ((unsigned int)(clear_value[0] * 16777215) << 8) +
5804 (unsigned int)max(min(clear_value[1], (PN_stdfloat)255), (PN_stdfloat)0);
5805 break;
5806
5807 case T_int:
5808 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5809 // normalization here, either.
5810 for (int i = 0; i < num_components; ++i) {
5811 ((int *)into)[i] = (int)clear_value[i];
5812 }
5813 break;
5814
5815 case T_byte:
5816 {
5817 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5818 scaled *= 127;
5819 for (int i = 0; i < num_components; ++i) {
5820 ((signed char *)into)[i] = (signed char)scaled[i];
5821 }
5822 break;
5823 }
5824
5825 case T_short:
5826 {
5827 LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5828 scaled *= 32767;
5829 for (int i = 0; i < num_components; ++i) {
5830 ((short *)into)[i] = (short)scaled[i];
5831 }
5832 break;
5833 }
5834
5835 case T_half_float:
5836 for (int i = 0; i < num_components; ++i) {
5837 union {
5838 uint32_t ui;
5839 float uf;
5840 } v;
5841 v.uf = clear_value[i];
5842 uint16_t sign = ((v.ui & 0x80000000u) >> 16u);
5843 uint32_t mantissa = (v.ui & 0x007fffffu);
5844 uint16_t exponent = (uint16_t)std::min(std::max((int)((v.ui & 0x7f800000u) >> 23u) - 112, 0), 31);
5845 mantissa += (mantissa & 0x00001000u) << 1u;
5846 ((uint16_t *)into)[i] = (uint16_t)(sign | ((exponent << 10u) | (mantissa >> 13u)));
5847 }
5848 break;
5849
5850 case T_unsigned_int:
5851 // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5852 // normalization here, either.
5853 for (int i = 0; i < num_components; ++i) {
5854 ((unsigned int *)into)[i] = (unsigned int)clear_value[i];
5855 }
5856 }
5857
5858 return num_components * cdata->_component_width;
5859}
5860
5861/**
5862 * Should be called after a texture has been loaded into RAM, this considers
5863 * generating mipmaps and/or compressing the RAM image.
5864 *
5865 * Returns true if the image was modified by this operation, false if it
5866 * wasn't.
5867 */
5868bool Texture::
5869consider_auto_process_ram_image(bool generate_mipmaps, bool allow_compression) {
5870 CDWriter cdata(_cycler, false);
5871 return do_consider_auto_process_ram_image(cdata, generate_mipmaps, allow_compression);
5872}
5873
5874/**
5875 * Should be called after a texture has been loaded into RAM, this considers
5876 * generating mipmaps and/or compressing the RAM image.
5877 *
5878 * Returns true if the image was modified by this operation, false if it
5879 * wasn't.
5880 */
5881bool Texture::
5882do_consider_auto_process_ram_image(CData *cdata, bool generate_mipmaps,
5883 bool allow_compression) {
5884 bool modified = false;
5885
5886 if (generate_mipmaps && !driver_generate_mipmaps &&
5887 cdata->_ram_images.size() == 1) {
5888 do_generate_ram_mipmap_images(cdata, false);
5889 modified = true;
5890 }
5891
5892 if (allow_compression && !driver_compress_textures) {
5893 CompressionMode compression = cdata->_compression;
5894 if (compression == CM_default && compressed_textures) {
5895 if (cdata->_texture_type == Texture::TT_buffer_texture) {
5896 compression = CM_off;
5897 }
5898 else {
5899 compression = CM_on;
5900 }
5901 }
5902 if (compression != CM_off && cdata->_ram_image_compression == CM_off) {
5904 if (do_compress_ram_image(cdata, compression, QL_default, gsg)) {
5905 if (gobj_cat.is_debug()) {
5906 gobj_cat.debug()
5907 << "Compressed " << get_name() << " with "
5908 << cdata->_ram_image_compression << "\n";
5909 }
5910 modified = true;
5911 }
5912 }
5913 }
5914
5915 return modified;
5916}
5917
5918/**
5919 *
5920 */
5921bool Texture::
5922do_compress_ram_image(CData *cdata, Texture::CompressionMode compression,
5923 Texture::QualityLevel quality_level,
5925 nassertr(compression != CM_off, false);
5926
5927 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
5928 return false;
5929 }
5930
5931 if (compression == CM_on) {
5932 // Select an appropriate compression mode automatically.
5933 switch (cdata->_format) {
5934 case Texture::F_rgbm:
5935 case Texture::F_rgb:
5936 case Texture::F_rgb5:
5937 case Texture::F_rgba5:
5938 case Texture::F_rgb8:
5939 case Texture::F_rgb12:
5940 case Texture::F_rgb332:
5941 case Texture::F_rgb16:
5942 case Texture::F_rgb32:
5943 case Texture::F_rgb10_a2:
5944 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt1)) {
5945 compression = CM_dxt1;
5946 } else if (gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5947 compression = CM_dxt3;
5948 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5949 compression = CM_dxt5;
5950 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5951 compression = CM_etc2;
5952 } else if (gsg->get_supports_compressed_texture_format(CM_etc1)) {
5953 compression = CM_etc1;
5954 }
5955 break;
5956
5957 case Texture::F_rgba4:
5958 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5959 compression = CM_dxt3;
5960 } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5961 compression = CM_dxt5;
5962 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5963 compression = CM_etc2;
5964 }
5965 break;
5966
5967 case Texture::F_rgba:
5968 case Texture::F_rgba8:
5969 case Texture::F_rgba12:
5970 case Texture::F_rgba16:
5971 case Texture::F_rgba32:
5972 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5973 compression = CM_dxt5;
5974 } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5975 compression = CM_etc2;
5976 }
5977 break;
5978
5979 case Texture::F_red:
5980 case Texture::F_rg:
5981 if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_rgtc)) {
5982 compression = CM_rgtc;
5983 } else if (gsg->get_supports_compressed_texture_format(CM_eac)) {
5984 compression = CM_eac;
5985 }
5986 break;
5987
5988 default:
5989 break;
5990 }
5991 }
5992
5993 // Choose an appropriate quality level.
5994 if (quality_level == Texture::QL_default) {
5995 quality_level = cdata->_quality_level;
5996 }
5997 if (quality_level == Texture::QL_default) {
5998 quality_level = texture_quality_level;
5999 }
6000
6001 if (compression == CM_rgtc) {
6002 // We should compress RGTC ourselves, as squish does not support it.
6003 if (cdata->_component_type != T_unsigned_byte) {
6004 return false;
6005 }
6006
6007 if (!do_has_all_ram_mipmap_images(cdata)) {
6008 // If we're about to compress the RAM image, we should ensure that we
6009 // have all of the mipmap levels first.
6010 do_generate_ram_mipmap_images(cdata, false);
6011 }
6012
6013 RamImages compressed_ram_images;
6014 compressed_ram_images.resize(cdata->_ram_images.size());
6015
6016 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6017 const RamImage *uncompressed_image = &cdata->_ram_images[n];
6018
6019 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6020 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6021 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6022
6023 // It is important that we handle image sizes that aren't a multiple of
6024 // the block size, since this method may be used to compress mipmaps,
6025 // which go all the way to 1x1. Pad the image if necessary.
6026 RamImage temp_image;
6027 if ((x_size | y_size) & 0x3) {
6028 int virtual_x_size = x_size;
6029 int virtual_y_size = y_size;
6030 x_size = (x_size + 3) & ~0x3;
6031 y_size = (y_size + 3) & ~0x3;
6032
6033 temp_image._page_size = x_size * y_size * cdata->_num_components;
6034 temp_image._image = PTA_uchar::empty_array(temp_image._page_size * num_pages);
6035
6036 for (int z = 0; z < num_pages; ++z) {
6037 unsigned char *dest = temp_image._image.p() + z * temp_image._page_size;
6038 unsigned const char *src = uncompressed_image->_image.p() + z * uncompressed_image->_page_size;
6039
6040 for (int y = 0; y < virtual_y_size; ++y) {
6041 memcpy(dest, src, virtual_x_size);
6042 src += virtual_x_size;
6043 dest += x_size;
6044 }
6045 }
6046
6047 uncompressed_image = &temp_image;
6048 }
6049
6050 // Create a new image to hold the compressed texture pages.
6051 RamImage &compressed_image = compressed_ram_images[n];
6052 compressed_image._page_size = (x_size * y_size * cdata->_num_components) >> 1;
6053 compressed_image._image = PTA_uchar::empty_array(compressed_image._page_size * num_pages);
6054
6055 if (cdata->_num_components == 1) {
6056 do_compress_ram_image_bc4(*uncompressed_image, compressed_image,
6057 x_size, y_size, num_pages);
6058 } else if (cdata->_num_components == 2) {
6059 do_compress_ram_image_bc5(*uncompressed_image, compressed_image,
6060 x_size, y_size, num_pages);
6061 } else {
6062 // Invalid.
6063 return false;
6064 }
6065 }
6066
6067 cdata->_ram_images.swap(compressed_ram_images);
6068 cdata->_ram_image_compression = CM_rgtc;
6069 return true;
6070 }
6071
6072#ifdef HAVE_SQUISH
6073 if (cdata->_texture_type != TT_3d_texture &&
6074 cdata->_texture_type != TT_2d_texture_array &&
6075 cdata->_component_type == T_unsigned_byte) {
6076 int squish_flags = 0;
6077 switch (compression) {
6078 case CM_dxt1:
6079 squish_flags |= squish::kDxt1;
6080 break;
6081
6082 case CM_dxt3:
6083 squish_flags |= squish::kDxt3;
6084 break;
6085
6086 case CM_dxt5:
6087 squish_flags |= squish::kDxt5;
6088 break;
6089
6090 default:
6091 break;
6092 }
6093
6094 if (squish_flags != 0) {
6095 // This compression mode is supported by squish; use it.
6096 switch (quality_level) {
6097 case QL_fastest:
6098 squish_flags |= squish::kColourRangeFit;
6099 break;
6100
6101 case QL_normal:
6102 // ColourClusterFit is just too slow for everyday use.
6103 squish_flags |= squish::kColourRangeFit;
6104 // squish_flags |= squish::kColourClusterFit;
6105 break;
6106
6107 case QL_best:
6108 squish_flags |= squish::kColourIterativeClusterFit;
6109 break;
6110
6111 default:
6112 break;
6113 }
6114
6115 if (do_squish(cdata, compression, squish_flags)) {
6116 return true;
6117 }
6118 }
6119 }
6120#endif // HAVE_SQUISH
6121
6122 return false;
6123}
6124
6125/**
6126 *
6127 */
6128bool Texture::
6129do_uncompress_ram_image(CData *cdata) {
6130 nassertr(!cdata->_ram_images.empty(), false);
6131
6132 if (cdata->_ram_image_compression == CM_rgtc) {
6133 // We should decompress RGTC ourselves, as squish doesn't support it.
6134 RamImages uncompressed_ram_images;
6135 uncompressed_ram_images.resize(cdata->_ram_images.size());
6136
6137 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6138 const RamImage &compressed_image = cdata->_ram_images[n];
6139
6140 int x_size = do_get_expected_mipmap_x_size(cdata, n);
6141 int y_size = do_get_expected_mipmap_y_size(cdata, n);
6142 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6143
6144 RamImage &uncompressed_image = uncompressed_ram_images[n];
6145 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
6146 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
6147
6148 if (cdata->_num_components == 1) {
6149 do_uncompress_ram_image_bc4(compressed_image, uncompressed_image,
6150 x_size, y_size, num_pages);
6151 } else if (cdata->_num_components == 2) {
6152 do_uncompress_ram_image_bc5(compressed_image, uncompressed_image,
6153 x_size, y_size, num_pages);
6154 } else {
6155 // Invalid.
6156 return false;
6157 }
6158 }
6159 cdata->_ram_images.swap(uncompressed_ram_images);
6160 cdata->_ram_image_compression = CM_off;
6161 return true;
6162 }
6163
6164#ifdef HAVE_SQUISH
6165 if (cdata->_texture_type != TT_3d_texture &&
6166 cdata->_texture_type != TT_2d_texture_array &&
6167 cdata->_component_type == T_unsigned_byte) {
6168 int squish_flags = 0;
6169 switch (cdata->_ram_image_compression) {
6170 case CM_dxt1:
6171 squish_flags |= squish::kDxt1;
6172 break;
6173
6174 case CM_dxt3:
6175 squish_flags |= squish::kDxt3;
6176 break;
6177
6178 case CM_dxt5:
6179 squish_flags |= squish::kDxt5;
6180 break;
6181
6182 default:
6183 break;
6184 }
6185
6186 if (squish_flags != 0) {
6187 // This compression mode is supported by squish; use it.
6188 if (do_unsquish(cdata, squish_flags)) {
6189 return true;
6190 }
6191 }
6192 }
6193#endif // HAVE_SQUISH
6194 return false;
6195}
6196
6197/**
6198 * Compresses a RAM image using BC4 compression.
6199 */
6200void Texture::
6201do_compress_ram_image_bc4(const RamImage &uncompressed_image,
6202 RamImage &compressed_image,
6203 int x_size, int y_size, int num_pages) {
6204 int x_blocks = (x_size >> 2);
6205 int y_blocks = (y_size >> 2);
6206
6207 // NB. This algorithm isn't fully optimal, since it doesn't try to make use
6208 // of the secondary interpolation mode supported by BC4. This is not
6209 // important for most textures, but it may be added in the future.
6210
6211 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 <= uncompressed_image._page_size);
6212 nassertv((size_t)x_size * (size_t)y_size == uncompressed_image._page_size);
6213
6214 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6215
6216 for (int z = 0; z < num_pages; ++z) {
6217 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6218 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6219
6220 // Convert one 4 x 4 block at a time.
6221 for (int y = 0; y < y_blocks; ++y) {
6222 for (int x = 0; x < x_blocks; ++x) {
6223 int a, b, c, d;
6224 float fac, add;
6225 unsigned char minv, maxv;
6226 unsigned const char *blk = src;
6227
6228 // Find the minimum and maximum value in the block.
6229 minv = blk[0];
6230 maxv = blk[0];
6231 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6232 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6233 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6234 blk += x_size;
6235 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6236 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6237 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6238 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6239 blk += x_size;
6240 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6241 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6242 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6243 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6244 blk += x_size;
6245 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6246 minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6247 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6248 minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6249
6250 // Now calculate the index for each pixel.
6251 blk = src;
6252 if (maxv > minv) {
6253 fac = 7.5f / (maxv - minv);
6254 } else {
6255 fac = 0;
6256 }
6257 add = -minv * fac;
6258 a = (remap[(int)(blk[0] * fac + add)])
6259 | (remap[(int)(blk[1] * fac + add)] << 3)
6260 | (remap[(int)(blk[2] * fac + add)] << 6)
6261 | (remap[(int)(blk[3] * fac + add)] << 9);
6262 blk += x_size;
6263 b = (remap[(int)(blk[0] * fac + add)] << 4)
6264 | (remap[(int)(blk[1] * fac + add)] << 7)
6265 | (remap[(int)(blk[2] * fac + add)] << 10)
6266 | (remap[(int)(blk[3] * fac + add)] << 13);
6267 blk += x_size;
6268 c = (remap[(int)(blk[0] * fac + add)])
6269 | (remap[(int)(blk[1] * fac + add)] << 3)
6270 | (remap[(int)(blk[2] * fac + add)] << 6)
6271 | (remap[(int)(blk[3] * fac + add)] << 9);
6272 blk += x_size;
6273 d = (remap[(int)(blk[0] * fac + add)] << 4)
6274 | (remap[(int)(blk[1] * fac + add)] << 7)
6275 | (remap[(int)(blk[2] * fac + add)] << 10)
6276 | (remap[(int)(blk[3] * fac + add)] << 13);
6277
6278 *(dest++) = maxv;
6279 *(dest++) = minv;
6280 *(dest++) = a & 0xff;
6281 *(dest++) = (a >> 8) | (b & 0xf0);
6282 *(dest++) = b >> 8;
6283 *(dest++) = c & 0xff;
6284 *(dest++) = (c >> 8) | (d & 0xf0);
6285 *(dest++) = d >> 8;
6286
6287 // Advance to the beginning of the next 4x4 block.
6288 src += 4;
6289 }
6290 src += x_size * 3;
6291 }
6293 }
6294}
6295
6296/**
6297 * Compresses a RAM image using BC5 compression.
6298 */
6299void Texture::
6300do_compress_ram_image_bc5(const RamImage &uncompressed_image,
6301 RamImage &compressed_image,
6302 int x_size, int y_size, int num_pages) {
6303 int x_blocks = (x_size >> 2);
6304 int y_blocks = (y_size >> 2);
6305 int stride = x_size * 2;
6306
6307 // BC5 uses the same compression algorithm as BC4, except repeated for two
6308 // channels.
6309
6310 nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 * 2 <= uncompressed_image._page_size);
6311 nassertv((size_t)stride * (size_t)y_size == uncompressed_image._page_size);
6312
6313 static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6314
6315 for (int z = 0; z < num_pages; ++z) {
6316 unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6317 unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6318
6319 // Convert one 4 x 4 block at a time.
6320 for (int y = 0; y < y_blocks; ++y) {
6321 for (int x = 0; x < x_blocks; ++x) {
6322 int a, b, c, d;
6323 float fac, add;
6324 unsigned char minv, maxv;
6325 unsigned const char *blk = src;
6326
6327 // Find the minimum and maximum red value in the block.
6328 minv = blk[0];
6329 maxv = blk[0];
6330 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6331 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6332 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6333 blk += stride;
6334 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6335 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6336 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6337 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6338 blk += stride;
6339 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6340 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6341 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6342 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6343 blk += stride;
6344 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6345 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6346 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6347 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6348
6349 // Now calculate the index for each pixel.
6350 if (maxv > minv) {
6351 fac = 7.5f / (maxv - minv);
6352 } else {
6353 fac = 0;
6354 }
6355 add = -minv * fac;
6356 blk = src;
6357 a = (remap[(int)(blk[0] * fac + add)])
6358 | (remap[(int)(blk[2] * fac + add)] << 3)
6359 | (remap[(int)(blk[4] * fac + add)] << 6)
6360 | (remap[(int)(blk[6] * fac + add)] << 9);
6361 blk += stride;
6362 b = (remap[(int)(blk[0] * fac + add)] << 4)
6363 | (remap[(int)(blk[2] * fac + add)] << 7)
6364 | (remap[(int)(blk[4] * fac + add)] << 10)
6365 | (remap[(int)(blk[6] * fac + add)] << 13);
6366 blk += stride;
6367 c = (remap[(int)(blk[0] * fac + add)])
6368 | (remap[(int)(blk[2] * fac + add)] << 3)
6369 | (remap[(int)(blk[4] * fac + add)] << 6)
6370 | (remap[(int)(blk[6] * fac + add)] << 9);
6371 blk += stride;
6372 d = (remap[(int)(blk[0] * fac + add)] << 4)
6373 | (remap[(int)(blk[2] * fac + add)] << 7)
6374 | (remap[(int)(blk[4] * fac + add)] << 10)
6375 | (remap[(int)(blk[6] * fac + add)] << 13);
6376
6377 *(dest++) = maxv;
6378 *(dest++) = minv;
6379 *(dest++) = a & 0xff;
6380 *(dest++) = (a >> 8) | (b & 0xf0);
6381 *(dest++) = b >> 8;
6382 *(dest++) = c & 0xff;
6383 *(dest++) = (c >> 8) | (d & 0xf0);
6384 *(dest++) = d >> 8;
6385
6386 // Find the minimum and maximum green value in the block.
6387 blk = src + 1;
6388 minv = blk[0];
6389 maxv = blk[0];
6390 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6391 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6392 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6393 blk += stride;
6394 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6395 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6396 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6397 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6398 blk += stride;
6399 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6400 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6401 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6402 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6403 blk += stride;
6404 minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6405 minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6406 minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6407 minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6408
6409 // Now calculate the index for each pixel.
6410 if (maxv > minv) {
6411 fac = 7.5f / (maxv - minv);
6412 } else {
6413 fac = 0;
6414 }
6415 add = -minv * fac;
6416 blk = src + 1;
6417 a = (remap[(int)(blk[0] * fac + add)])
6418 | (remap[(int)(blk[2] * fac + add)] << 3)
6419 | (remap[(int)(blk[4] * fac + add)] << 6)
6420 | (remap[(int)(blk[6] * fac + add)] << 9);
6421 blk += stride;
6422 b = (remap[(int)(blk[0] * fac + add)] << 4)
6423 | (remap[(int)(blk[2] * fac + add)] << 7)
6424 | (remap[(int)(blk[4] * fac + add)] << 10)
6425 | (remap[(int)(blk[6] * fac + add)] << 13);
6426 blk += stride;
6427 c = (remap[(int)(blk[0] * fac + add)])
6428 | (remap[(int)(blk[2] * fac + add)] << 3)
6429 | (remap[(int)(blk[4] * fac + add)] << 6)
6430 | (remap[(int)(blk[6] * fac + add)] << 9);
6431 blk += stride;
6432 d = (remap[(int)(blk[0] * fac + add)] << 4)
6433 | (remap[(int)(blk[2] * fac + add)] << 7)
6434 | (remap[(int)(blk[4] * fac + add)] << 10)
6435 | (remap[(int)(blk[6] * fac + add)] << 13);
6436
6437 *(dest++) = maxv;
6438 *(dest++) = minv;
6439 *(dest++) = a & 0xff;
6440 *(dest++) = (a >> 8) | (b & 0xf0);
6441 *(dest++) = b >> 8;
6442 *(dest++) = c & 0xff;
6443 *(dest++) = (c >> 8) | (d & 0xf0);
6444 *(dest++) = d >> 8;
6445
6446 // Advance to the beginning of the next 4x4 block.
6447 src += 8;
6448 }
6449 src += stride * 3;
6450 }
6452 }
6453}
6454
6455/**
6456 * Decompresses a RAM image compressed using BC4.
6457 */
6458void Texture::
6459do_uncompress_ram_image_bc4(const RamImage &compressed_image,
6460 RamImage &uncompressed_image,
6461 int x_size, int y_size, int num_pages) {
6462 int x_blocks = (x_size >> 2);
6463 int y_blocks = (y_size >> 2);
6464
6465 for (int z = 0; z < num_pages; ++z) {
6466 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6467 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6468
6469 // Unconvert one 4 x 4 block at a time.
6470 uint8_t tbl[8];
6471 for (int y = 0; y < y_blocks; ++y) {
6472 for (int x = 0; x < x_blocks; ++x) {
6473 unsigned char *blk = dest;
6474 tbl[0] = src[0];
6475 tbl[1] = src[1];
6476 if (tbl[0] > tbl[1]) {
6477 tbl[2] = (tbl[0] * 6 + tbl[1] * 1) / 7.0f;
6478 tbl[3] = (tbl[0] * 5 + tbl[1] * 2) / 7.0f;
6479 tbl[4] = (tbl[0] * 4 + tbl[1] * 3) / 7.0f;
6480 tbl[5] = (tbl[0] * 3 + tbl[1] * 4) / 7.0f;
6481 tbl[6] = (tbl[0] * 2 + tbl[1] * 5) / 7.0f;
6482 tbl[7] = (tbl[0] * 1 + tbl[1] * 6) / 7.0f;
6483 } else {
6484 tbl[2] = (tbl[0] * 4 + tbl[1] * 1) / 5.0f;
6485 tbl[3] = (tbl[0] * 3 + tbl[1] * 2) / 5.0f;
6486 tbl[4] = (tbl[0] * 2 + tbl[1] * 3) / 5.0f;
6487 tbl[5] = (tbl[0] * 1 + tbl[1] * 4) / 5.0f;
6488 tbl[6] = 0;
6489 tbl[7] = 255;
6490 }
6491 int v = src[2] + (src[3] << 8) + (src[4] << 16);
6492 blk[0] = tbl[v & 0x7];
6493 blk[1] = tbl[(v & 0x000038) >> 3];
6494 blk[2] = tbl[(v & 0x0001c0) >> 6];
6495 blk[3] = tbl[(v & 0x000e00) >> 9];
6496 blk += x_size;
6497 blk[0] = tbl[(v & 0x007000) >> 12];
6498 blk[1] = tbl[(v & 0x038000) >> 15];
6499 blk[2] = tbl[(v & 0x1c0000) >> 18];
6500 blk[3] = tbl[(v & 0xe00000) >> 21];
6501 blk += x_size;
6502 v = src[5] + (src[6] << 8) + (src[7] << 16);
6503 blk[0] = tbl[v & 0x7];
6504 blk[1] = tbl[(v & 0x000038) >> 3];
6505 blk[2] = tbl[(v & 0x0001c0) >> 6];
6506 blk[3] = tbl[(v & 0x000e00) >> 9];
6507 blk += x_size;
6508 blk[0] = tbl[(v & 0x007000) >> 12];
6509 blk[1] = tbl[(v & 0x038000) >> 15];
6510 blk[2] = tbl[(v & 0x1c0000) >> 18];
6511 blk[3] = tbl[(v & 0xe00000) >> 21];
6512 src += 8;
6513 dest += 4;
6514 }
6515 dest += x_size * 3;
6516 }
6518 }
6519}
6520
6521/**
6522 * Decompresses a RAM image compressed using BC5.
6523 */
6524void Texture::
6525do_uncompress_ram_image_bc5(const RamImage &compressed_image,
6526 RamImage &uncompressed_image,
6527 int x_size, int y_size, int num_pages) {
6528 int x_blocks = (x_size >> 2);
6529 int y_blocks = (y_size >> 2);
6530 int stride = x_size * 2;
6531
6532 for (int z = 0; z < num_pages; ++z) {
6533 unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6534 unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6535
6536 // Unconvert one 4 x 4 block at a time.
6537 uint8_t red[8];
6538 uint8_t grn[8];
6539 for (int y = 0; y < y_blocks; ++y) {
6540 for (int x = 0; x < x_blocks; ++x) {
6541 unsigned char *blk = dest;
6542 red[0] = src[0];
6543 red[1] = src[1];
6544 if (red[0] > red[1]) {
6545 red[2] = (red[0] * 6 + red[1] * 1) / 7.0f;
6546 red[3] = (red[0] * 5 + red[1] * 2) / 7.0f;
6547 red[4] = (red[0] * 4 + red[1] * 3) / 7.0f;
6548 red[5] = (red[0] * 3 + red[1] * 4) / 7.0f;
6549 red[6] = (red[0] * 2 + red[1] * 5) / 7.0f;
6550 red[7] = (red[0] * 1 + red[1] * 6) / 7.0f;
6551 } else {
6552 red[2] = (red[0] * 4 + red[1] * 1) / 5.0f;
6553 red[3] = (red[0] * 3 + red[1] * 2) / 5.0f;
6554 red[4] = (red[0] * 2 + red[1] * 3) / 5.0f;
6555 red[5] = (red[0] * 1 + red[1] * 4) / 5.0f;
6556 red[6] = 0;
6557 red[7] = 255;
6558 }
6559 grn[0] = src[8];
6560 grn[1] = src[9];
6561 if (grn[0] > grn[1]) {
6562 grn[2] = (grn[0] * 6 + grn[1] * 1) / 7.0f;
6563 grn[3] = (grn[0] * 5 + grn[1] * 2) / 7.0f;
6564 grn[4] = (grn[0] * 4 + grn[1] * 3) / 7.0f;
6565 grn[5] = (grn[0] * 3 + grn[1] * 4) / 7.0f;
6566 grn[6] = (grn[0] * 2 + grn[1] * 5) / 7.0f;
6567 grn[7] = (grn[0] * 1 + grn[1] * 6) / 7.0f;
6568 } else {
6569 grn[2] = (grn[0] * 4 + grn[1] * 1) / 5.0f;
6570 grn[3] = (grn[0] * 3 + grn[1] * 2) / 5.0f;
6571 grn[4] = (grn[0] * 2 + grn[1] * 3) / 5.0f;
6572 grn[5] = (grn[0] * 1 + grn[1] * 4) / 5.0f;
6573 grn[6] = 0;
6574 grn[7] = 255;
6575 }
6576 int r = src[2] + (src[3] << 8) + (src[4] << 16);
6577 int g = src[10] + (src[11] << 8) + (src[12] << 16);
6578 blk[0] = red[r & 0x7];
6579 blk[1] = grn[g & 0x7];
6580 blk[2] = red[(r & 0x000038) >> 3];
6581 blk[3] = grn[(g & 0x000038) >> 3];
6582 blk[4] = red[(r & 0x0001c0) >> 6];
6583 blk[5] = grn[(g & 0x0001c0) >> 6];
6584 blk[6] = red[(r & 0x000e00) >> 9];
6585 blk[7] = grn[(g & 0x000e00) >> 9];
6586 blk += stride;
6587 blk[0] = red[(r & 0x007000) >> 12];
6588 blk[1] = grn[(g & 0x007000) >> 12];
6589 blk[2] = red[(r & 0x038000) >> 15];
6590 blk[3] = grn[(g & 0x038000) >> 15];
6591 blk[4] = red[(r & 0x1c0000) >> 18];
6592 blk[5] = grn[(g & 0x1c0000) >> 18];
6593 blk[6] = red[(r & 0xe00000) >> 21];
6594 blk[7] = grn[(g & 0xe00000) >> 21];
6595 blk += stride;
6596 r = src[5] + (src[6] << 8) + (src[7] << 16);
6597 g = src[13] + (src[14] << 8) + (src[15] << 16);
6598 blk[0] = red[r & 0x7];
6599 blk[1] = grn[g & 0x7];
6600 blk[2] = red[(r & 0x000038) >> 3];
6601 blk[3] = grn[(g & 0x000038) >> 3];
6602 blk[4] = red[(r & 0x0001c0) >> 6];
6603 blk[5] = grn[(g & 0x0001c0) >> 6];
6604 blk[6] = red[(r & 0x000e00) >> 9];
6605 blk[7] = grn[(g & 0x000e00) >> 9];
6606 blk += stride;
6607 blk[0] = red[(r & 0x007000) >> 12];
6608 blk[1] = grn[(g & 0x007000) >> 12];
6609 blk[2] = red[(r & 0x038000) >> 15];
6610 blk[3] = grn[(g & 0x038000) >> 15];
6611 blk[4] = red[(r & 0x1c0000) >> 18];
6612 blk[5] = grn[(g & 0x1c0000) >> 18];
6613 blk[6] = red[(r & 0xe00000) >> 21];
6614 blk[7] = grn[(g & 0xe00000) >> 21];
6615 src += 16;
6616 dest += 8;
6617 }
6618 dest += stride * 3;
6619 }
6621 }
6622}
6623
6624/**
6625 *
6626 */
6627bool Texture::
6628do_has_all_ram_mipmap_images(const CData *cdata) const {
6629 if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
6630 // If we don't even have a base image, the answer is no.
6631 return false;
6632 }
6633 if (!uses_mipmaps()) {
6634 // If we have a base image and don't require mipmapping, the answer is
6635 // yes.
6636 return true;
6637 }
6638
6639 // Check that we have enough mipmap levels to meet the size requirements.
6640 int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
6641 int n = 0;
6642 int x = 1;
6643 while (x < size) {
6644 x = (x << 1);
6645 ++n;
6646 if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
6647 return false;
6648 }
6649 }
6650
6651 return true;
6652}
6653
6654/**
6655 * Considers whether the z_size (or num_views) should automatically be
6656 * adjusted when the user loads a new page. Returns true if the z size is
6657 * valid, false otherwise.
6658 *
6659 * Assumes the lock is already held.
6660 */
6661bool Texture::
6662do_reconsider_z_size(CData *cdata, int z, const LoaderOptions &options) {
6663 if (z >= cdata->_z_size * cdata->_num_views) {
6664 bool num_views_specified = true;
6665 if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
6666 // This flag is false if is a multiview texture with a specified number
6667 // of views. It is true if it is not a multiview texture, or if it is
6668 // but the number of views is explicitly specified.
6669 num_views_specified = (options.get_texture_num_views() != 0);
6670 }
6671
6672 if (num_views_specified &&
6673 (cdata->_texture_type == Texture::TT_3d_texture ||
6674 cdata->_texture_type == Texture::TT_2d_texture_array)) {
6675 // If we're loading a page past _z_size, treat it as an implicit request
6676 // to enlarge _z_size. However, this is only legal if this is, in fact,
6677 // a 3-d texture or a 2d texture array (cube maps always have z_size 6,
6678 // and other types have z_size 1).
6679 nassertr(cdata->_num_views != 0, false);
6680 cdata->_z_size = (z / cdata->_num_views) + 1;
6681
6682 } else if (cdata->_z_size != 0) {
6683 // In the case of a 2-d texture or cube map, or a 3-d texture with an
6684 // unspecified _num_views, assume we're loading views of a multiview
6685 // texture.
6686 cdata->_num_views = (z / cdata->_z_size) + 1;
6687
6688 } else {
6689 // The first image loaded sets an implicit z-size.
6690 cdata->_z_size = 1;
6691 }
6692
6693 // Increase the size of the data buffer to make room for the new texture
6694 // level.
6695 do_allocate_pages(cdata);
6696 }
6697
6698 return true;
6699}
6700
6701/**
6702 * Called internally by do_reconsider_z_size() to allocate new memory in
6703 * _ram_images[0] for the new number of pages.
6704 *
6705 * Assumes the lock is already held.
6706 */
6707void Texture::
6708do_allocate_pages(CData *cdata) {
6709 size_t new_size = do_get_expected_ram_image_size(cdata);
6710 if (!cdata->_ram_images.empty() &&
6711 !cdata->_ram_images[0]._image.empty() &&
6712 new_size > cdata->_ram_images[0]._image.size()) {
6713 cdata->_ram_images[0]._image.insert(cdata->_ram_images[0]._image.end(), new_size - cdata->_ram_images[0]._image.size(), 0);
6714 nassertv(cdata->_ram_images[0]._image.size() == new_size);
6715 }
6716}
6717
6718/**
6719 * Resets the internal Texture properties when a new image file is loaded.
6720 * Returns true if the new image is valid, false otherwise.
6721 *
6722 * Assumes the lock is already held.
6723 */
6724bool Texture::
6725do_reconsider_image_properties(CData *cdata, int x_size, int y_size, int num_components,
6726 Texture::ComponentType component_type, int z,
6727 const LoaderOptions &options) {
6728 if (!cdata->_loaded_from_image || num_components != cdata->_num_components || component_type != cdata->_component_type) {
6729 // Come up with a default format based on the number of channels. But
6730 // only do this the first time the file is loaded, or if the number of
6731 // channels in the image changes on subsequent loads.
6732
6733 // TODO: handle sRGB properly
6734 switch (num_components) {
6735 case 1:
6736 cdata->_format = F_luminance;
6737 break;
6738
6739 case 2:
6740 cdata->_format = F_luminance_alpha;
6741 break;
6742
6743 case 3:
6744 cdata->_format = F_rgb;
6745 break;
6746
6747 case 4:
6748 cdata->_format = F_rgba;
6749 break;
6750
6751 default:
6752 // Eh?
6753 nassert_raise("unexpected channel count");
6754 cdata->_format = F_rgb;
6755 return false;
6756 }
6757 }
6758
6759 if (!cdata->_loaded_from_image) {
6760 if ((options.get_texture_flags() & LoaderOptions::TF_allow_1d) &&
6761 cdata->_texture_type == TT_2d_texture && x_size != 1 && y_size == 1) {
6762 // If we're loading an Nx1 size texture, infer a 1-d texture type.
6763 cdata->_texture_type = TT_1d_texture;
6764 }
6765
6766#ifndef NDEBUG
6767 switch (cdata->_texture_type) {
6768 case TT_1d_texture:
6769 case TT_buffer_texture:
6770 nassertr(y_size == 1, false);
6771 break;
6772 case TT_cube_map:
6773 case TT_cube_map_array:
6774 nassertr(x_size == y_size, false);
6775 break;
6776 default:
6777 break;
6778 }
6779#endif
6780 if ((cdata->_x_size != x_size)||(cdata->_y_size != y_size)) {
6781 do_set_pad_size(cdata, 0, 0, 0);
6782 }
6783 cdata->_x_size = x_size;
6784 cdata->_y_size = y_size;
6785 cdata->_num_components = num_components;
6786 do_set_component_type(cdata, component_type);
6787
6788 } else {
6789 if (cdata->_x_size != x_size ||
6790 cdata->_y_size != y_size ||
6791 cdata->_num_components != num_components ||
6792 cdata->_component_type != component_type) {
6793 gobj_cat.error()
6794 << "Texture properties have changed for texture " << get_name()
6795 << " page " << z << ".\n";
6796 return false;
6797 }
6798 }
6799
6800 return true;
6801}
6802
6803/**
6804 *
6805 */
6806bool Texture::
6807do_rescale_texture(CData *cdata) {
6808 int new_x_size = cdata->_x_size;
6809 int new_y_size = cdata->_y_size;
6810 if (cdata->_z_size * cdata->_num_views != 1) {
6811 nassert_raise("rescale_texture() doesn't support 3-d or multiview textures.");
6812 return false;
6813 }
6814
6815 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), false)) {
6816 // OK, we have to scale the image.
6817 PNMImage orig_image;
6818 if (!do_store_one(cdata, orig_image, 0, 0)) {
6819 gobj_cat.warning()
6820 << "Couldn't get image in rescale_texture()\n";
6821 return false;
6822 }
6823
6824 gobj_cat.info()
6825 << "Resizing " << get_name() << " to " << new_x_size << " x "
6826 << new_y_size << "\n";
6827 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6828 orig_image.get_maxval(), orig_image.get_type(),
6829 orig_image.get_color_space());
6830 new_image.quick_filter_from(orig_image);
6831
6832 do_clear_ram_image(cdata);
6833 cdata->inc_image_modified();
6834 cdata->_x_size = new_x_size;
6835 cdata->_y_size = new_y_size;
6836 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6837 return false;
6838 }
6839
6840 return true;
6841 }
6842
6843 // Maybe we should pad the image.
6844 int pad_x_size = 0;
6845 int pad_y_size = 0;
6846 if (do_get_auto_texture_scale(cdata) == ATS_pad) {
6847 new_x_size = cdata->_x_size;
6848 new_y_size = cdata->_y_size;
6849 if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), true)) {
6850 pad_x_size = new_x_size - cdata->_x_size;
6851 pad_y_size = new_y_size - cdata->_y_size;
6852
6853 PNMImage orig_image;
6854 if (!do_store_one(cdata, orig_image, 0, 0)) {
6855 gobj_cat.warning()
6856 << "Couldn't get image in rescale_texture()\n";
6857 return false;
6858 }
6859 PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6860 orig_image.get_maxval(), orig_image.get_type(),
6861 orig_image.get_color_space());
6862 new_image.copy_sub_image(orig_image, 0, new_y_size - orig_image.get_y_size());
6863
6864 do_clear_ram_image(cdata);
6865 cdata->_loaded_from_image = false;
6866 cdata->inc_image_modified();
6867 if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6868 return false;
6869 }
6870
6871 do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
6872 return true;
6873 }
6874 }
6875
6876 // No changes needed.
6877 return false;
6878}
6879
6880/**
6881 *
6882 */
6883PT(Texture) Texture::
6884make_copy_impl() const {
6885 CDReader cdata(_cycler);
6886 return do_make_copy(cdata);
6887}
6888
6889/**
6890 *
6891 */
6892PT(Texture) Texture::
6893do_make_copy(const CData *cdata) const {
6894 PT(Texture) tex = new Texture(get_name());
6895 CDWriter cdata_tex(tex->_cycler, true);
6896 tex->do_assign(cdata_tex, this, cdata);
6897 return tex;
6898}
6899
6900/**
6901 * The internal implementation of operator =(). Assumes the lock is already
6902 * held on both Textures.
6903 */
6904void Texture::
6905do_assign(CData *cdata, const Texture *copy, const CData *cdata_copy) {
6906 cdata->do_assign(cdata_copy);
6907}
6908
6909/**
6910 * The protected implementation of clear(). Assumes the lock is already held.
6911 */
6912void Texture::
6913do_clear(CData *cdata) {
6914 Texture tex;
6915 tex.local_object();
6916 CDReader cdata_tex(tex._cycler);
6917 do_assign(cdata, &tex, cdata_tex);
6918
6919 cdata->inc_properties_modified();
6920 cdata->inc_image_modified();
6921 cdata->inc_simple_image_modified();
6922}
6923
6924/**
6925 *
6926 */
6927void Texture::
6928do_setup_texture(CData *cdata, Texture::TextureType texture_type,
6929 int x_size, int y_size, int z_size,
6930 Texture::ComponentType component_type,
6931 Texture::Format format) {
6932 switch (texture_type) {
6933 case TT_1d_texture:
6934 nassertv(y_size == 1 && z_size == 1);
6935 break;
6936
6937 case TT_2d_texture:
6938 nassertv(z_size == 1);
6939 break;
6940
6941 case TT_3d_texture:
6942 break;
6943
6944 case TT_2d_texture_array:
6945 break;
6946
6947 case TT_cube_map:
6948 // Cube maps must always consist of six square images.
6949 nassertv(x_size == y_size && z_size == 6);
6950
6951 // In principle the wrap mode shouldn't mean anything to a cube map, but
6952 // some drivers seem to misbehave if it's other than
6953 // SamplerState::WM_clamp.
6954 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6955 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6956 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6957 break;
6958
6959 case TT_cube_map_array:
6960 // Cube maps array z_size needs to be a multiple of 6.
6961 nassertv(x_size == y_size && z_size % 6 == 0);
6962
6963 cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6964 cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6965 cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6966 break;
6967
6968 case TT_buffer_texture:
6969 nassertv(y_size == 1 && z_size == 1);
6970 break;
6971
6972 case TT_1d_texture_array:
6973 nassertv(z_size == 1);
6974 break;
6975 }
6976
6977 if (texture_type != TT_2d_texture) {
6978 do_clear_simple_ram_image(cdata);
6979 }
6980
6981 cdata->_texture_type = texture_type;
6982 cdata->_x_size = x_size;
6983 cdata->_y_size = y_size;
6984 cdata->_z_size = z_size;
6985 cdata->_num_views = 1;
6986 do_set_component_type(cdata, component_type);
6987 do_set_format(cdata, format);
6988
6989 do_clear_ram_image(cdata);
6990 do_set_pad_size(cdata, 0, 0, 0);
6991 cdata->_orig_file_x_size = 0;
6992 cdata->_orig_file_y_size = 0;
6993 cdata->_loaded_from_image = false;
6994 cdata->_loaded_from_txo = false;
6995 cdata->_has_read_pages = false;
6996 cdata->_has_read_mipmaps = false;
6997}
6998
6999/**
7000 *
7001 */
7002void Texture::
7003do_set_format(CData *cdata, Texture::Format format) {
7004 if (format == cdata->_format) {
7005 return;
7006 }
7007 cdata->_format = format;
7008 cdata->inc_properties_modified();
7009
7010 switch (cdata->_format) {
7011 case F_color_index:
7012 case F_depth_stencil:
7013 case F_depth_component:
7014 case F_depth_component16:
7015 case F_depth_component24:
7016 case F_depth_component32:
7017 case F_red:
7018 case F_green:
7019 case F_blue:
7020 case F_alpha:
7021 case F_luminance:
7022 case F_r16:
7023 case F_r16i:
7024 case F_sluminance:
7025 case F_r32i:
7026 case F_r32:
7027 case F_r8i:
7028 cdata->_num_components = 1;
7029 break;
7030
7031 case F_luminance_alpha:
7032 case F_luminance_alphamask:
7033 case F_rg16:
7034 case F_sluminance_alpha:
7035 case F_rg32:
7036 case F_rg8i:
7037 case F_rg:
7038 case F_rg16i:
7039 case F_rg32i:
7040 cdata->_num_components = 2;
7041 break;
7042
7043 case F_rgb:
7044 case F_rgb5:
7045 case F_rgb8:
7046 case F_rgb12:
7047 case F_rgb332:
7048 case F_rgb16:
7049 case F_srgb:
7050 case F_rgb32:
7051 case F_rgb8i:
7052 case F_r11_g11_b10:
7053 case F_rgb9_e5:
7054 case F_rgb16i:
7055 case F_rgb32i:
7056 cdata->_num_components = 3;
7057 break;
7058
7059 case F_rgba:
7060 case F_rgbm:
7061 case F_rgba4:
7062 case F_rgba5:
7063 case F_rgba8:
7064 case F_rgba12:
7065 case F_rgba16:
7066 case F_rgba32:
7067 case F_srgb_alpha:
7068 case F_rgba8i:
7069 case F_rgb10_a2:
7070 case F_rgba16i:
7071 case F_rgba32i:
7072 cdata->_num_components = 4;
7073 break;
7074 }
7075}
7076
7077/**
7078 *
7079 */
7080void Texture::
7081do_set_component_type(CData *cdata, Texture::ComponentType component_type) {
7082 cdata->_component_type = component_type;
7083
7084 switch (component_type) {
7085 case T_unsigned_byte:
7086 case T_byte:
7087 cdata->_component_width = 1;
7088 break;
7089
7090 case T_unsigned_short:
7091 case T_short:
7092 case T_half_float:
7093 cdata->_component_width = 2;
7094 break;
7095
7096 case T_float:
7097 case T_unsigned_int_24_8:
7098 case T_int:
7099 case T_unsigned_int:
7100 cdata->_component_width = 4;
7101 break;
7102 }
7103}
7104
7105/**
7106 *
7107 */
7108void Texture::
7109do_set_x_size(CData *cdata, int x_size) {
7110 if (cdata->_x_size != x_size) {
7111 cdata->_x_size = x_size;
7112 cdata->inc_image_modified();
7113 do_clear_ram_image(cdata);
7114 do_set_pad_size(cdata, 0, 0, 0);
7115 }
7116}
7117
7118/**
7119 *
7120 */
7121void Texture::
7122do_set_y_size(CData *cdata, int y_size) {
7123 if (cdata->_y_size != y_size) {
7124 nassertv((cdata->_texture_type != Texture::TT_buffer_texture &&
7125 cdata->_texture_type != Texture::TT_1d_texture) || y_size == 1);
7126 cdata->_y_size = y_size;
7127 cdata->inc_image_modified();
7128 do_clear_ram_image(cdata);
7129 do_set_pad_size(cdata, 0, 0, 0);
7130 }
7131}
7132
7133/**
7134 * Changes the z size indicated for the texture. This also implicitly unloads
7135 * the texture if it has already been loaded.
7136 */
7137void Texture::
7138do_set_z_size(CData *cdata, int z_size) {
7139 if (cdata->_z_size != z_size) {
7140 nassertv((cdata->_texture_type == Texture::TT_3d_texture) ||
7141 (cdata->_texture_type == Texture::TT_cube_map && z_size == 6) ||
7142 (cdata->_texture_type == Texture::TT_cube_map_array && z_size % 6 == 0) ||
7143 (cdata->_texture_type == Texture::TT_2d_texture_array) || (z_size == 1));
7144 cdata->_z_size = z_size;
7145 cdata->inc_image_modified();
7146 do_clear_ram_image(cdata);
7147 do_set_pad_size(cdata, 0, 0, 0);
7148 }
7149}
7150
7151/**
7152 *
7153 */
7154void Texture::
7155do_set_num_views(CData *cdata, int num_views) {
7156 nassertv(num_views >= 1);
7157 if (cdata->_num_views != num_views) {
7158 cdata->_num_views = num_views;
7159 if (do_has_ram_image(cdata)) {
7160 cdata->inc_image_modified();
7161 do_clear_ram_image(cdata);
7162 }
7163 do_set_pad_size(cdata, 0, 0, 0);
7164 }
7165}
7166
7167/**
7168 *
7169 */
7170void Texture::
7171do_set_wrap_u(CData *cdata, SamplerState::WrapMode wrap) {
7172 if (cdata->_default_sampler.get_wrap_u() != wrap) {
7173 cdata->inc_properties_modified();
7174 cdata->_default_sampler.set_wrap_u(wrap);
7175 }
7176}
7177
7178/**
7179 *
7180 */
7181void Texture::
7182do_set_wrap_v(CData *cdata, SamplerState::WrapMode wrap) {
7183 if (cdata->_default_sampler.get_wrap_v() != wrap) {
7184 cdata->inc_properties_modified();
7185 cdata->_default_sampler.set_wrap_v(wrap);
7186 }
7187}
7188
7189/**
7190 *
7191 */
7192void Texture::
7193do_set_wrap_w(CData *cdata, SamplerState::WrapMode wrap) {
7194 if (cdata->_default_sampler.get_wrap_w() != wrap) {
7195 cdata->inc_properties_modified();
7196 cdata->_default_sampler.set_wrap_w(wrap);
7197 }
7198}
7199
7200/**
7201 *
7202 */
7203void Texture::
7204do_set_minfilter(CData *cdata, SamplerState::FilterType filter) {
7205 if (cdata->_default_sampler.get_minfilter() != filter) {
7206 cdata->inc_properties_modified();
7207 cdata->_default_sampler.set_minfilter(filter);
7208 }
7209}
7210
7211/**
7212 *
7213 */
7214void Texture::
7215do_set_magfilter(CData *cdata, SamplerState::FilterType filter) {
7216 if (cdata->_default_sampler.get_magfilter() != filter) {
7217 cdata->inc_properties_modified();
7218 cdata->_default_sampler.set_magfilter(filter);
7219 }
7220}
7221
7222/**
7223 *
7224 */
7225void Texture::
7226do_set_anisotropic_degree(CData *cdata, int anisotropic_degree) {
7227 if (cdata->_default_sampler.get_anisotropic_degree() != anisotropic_degree) {
7228 cdata->inc_properties_modified();
7229 cdata->_default_sampler.set_anisotropic_degree(anisotropic_degree);
7230 }
7231}
7232
7233/**
7234 *
7235 */
7236void Texture::
7237do_set_border_color(CData *cdata, const LColor &color) {
7238 if (cdata->_default_sampler.get_border_color() != color) {
7239 cdata->inc_properties_modified();
7240 cdata->_default_sampler.set_border_color(color);
7241 }
7242}
7243
7244/**
7245 *
7246 */
7247void Texture::
7248do_set_compression(CData *cdata, Texture::CompressionMode compression) {
7249 if (cdata->_compression != compression) {
7250 cdata->inc_properties_modified();
7251 cdata->_compression = compression;
7252
7253 if (do_has_ram_image(cdata)) {
7254 bool has_compression = do_has_compression(cdata);
7255 bool has_ram_image_compression = (cdata->_ram_image_compression != CM_off);
7256 if (has_compression != has_ram_image_compression ||
7258 // Reload if we're turning compression on or off, or if we're changing
7259 // the compression mode to a different kind of compression.
7260 do_reload(cdata);
7261 }
7262 }
7263 }
7264}
7265
7266/**
7267 *
7268 */
7269void Texture::
7270do_set_quality_level(CData *cdata, Texture::QualityLevel quality_level) {
7271 if (cdata->_quality_level != quality_level) {
7272 cdata->inc_properties_modified();
7273 cdata->_quality_level = quality_level;
7274 }
7275}
7276
7277/**
7278 *
7279 */
7280bool Texture::
7281do_has_compression(const CData *cdata) const {
7282 if (cdata->_compression == CM_default) {
7283 if (cdata->_texture_type != Texture::TT_buffer_texture) {
7284 return compressed_textures;
7285 } else {
7286 return false;
7287 }
7288 } else {
7289 return (cdata->_compression != CM_off);
7290 }
7291}
7292
7293/**
7294 * The protected implementation of has_ram_image(). Assumes the lock is
7295 * already held.
7296 */
7297bool Texture::
7298do_has_ram_image(const CData *cdata) const {
7299 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty();
7300}
7301
7302/**
7303 * The protected implementation of has_uncompressed_ram_image(). Assumes the
7304 * lock is already held.
7305 */
7306bool Texture::
7307do_has_uncompressed_ram_image(const CData *cdata) const {
7308 return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty() && cdata->_ram_image_compression == CM_off;
7309}
7310
7311/**
7312 *
7313 */
7314CPTA_uchar Texture::
7315do_get_ram_image(CData *cdata) {
7316 if (!do_has_ram_image(cdata) && do_can_reload(cdata)) {
7317 do_reload_ram_image(cdata, true);
7318
7319 if (do_has_ram_image(cdata)) {
7320 // Normally, we don't update the cdata->_modified semaphores in a
7321 // do_blah method, but we'll make an exception in this case, because
7322 // it's easiest to modify these here, and only when we know it's needed.
7323 cdata->inc_image_modified();
7324 cdata->inc_properties_modified();
7325 }
7326 }
7327
7328 if (cdata->_ram_images.empty()) {
7329 return CPTA_uchar(get_class_type());
7330 }
7331
7332 return cdata->_ram_images[0]._image;
7333}
7334
7335/**
7336 *
7337 */
7338CPTA_uchar Texture::
7339do_get_uncompressed_ram_image(CData *cdata) {
7340 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7341 // We have an image in-ram, but it's compressed. Try to uncompress it
7342 // first.
7343 if (do_uncompress_ram_image(cdata)) {
7344 if (gobj_cat.is_debug()) {
7345 gobj_cat.debug()
7346 << "Uncompressed " << get_name() << "\n";
7347 }
7348 return cdata->_ram_images[0]._image;
7349 }
7350 }
7351
7352 // Couldn't uncompress the existing image. Try to reload it.
7353 if ((!do_has_ram_image(cdata) || cdata->_ram_image_compression != CM_off) && do_can_reload(cdata)) {
7354 do_reload_ram_image(cdata, false);
7355 }
7356
7357 if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7358 // Great, now we have an image.
7359 if (do_uncompress_ram_image(cdata)) {
7360 gobj_cat.info()
7361 << "Uncompressed " << get_name() << "\n";
7362 return cdata->_ram_images[0]._image;
7363 }
7364 }
7365
7366 if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
7367 return CPTA_uchar(get_class_type());
7368 }
7369
7370 return cdata->_ram_images[0]._image;
7371}
7372
7373/**
7374 * Returns the uncompressed system-RAM image data associated with the texture.
7375 * Rather than just returning a pointer to the data, like
7376 * get_uncompressed_ram_image, this function first processes the data and
7377 * reorders the components using the specified format string, and places these
7378 * into a new char array.
7379 *
7380 * The 'format' argument should specify in which order the components of the
7381 * texture must be. For example, valid format strings are "RGBA", "GA",
7382 * "ABRG" or "AAA". A component can also be written as "0" or "1", which
7383 * means an empty/black or a full/white channel, respectively.
7384 *
7385 * This function is particularly useful to copy an image in-memory to a
7386 * different library (for example, PIL or wxWidgets) that require a different
7387 * component order than Panda's internal format, BGRA. Note, however, that
7388 * this conversion can still be too slow if you want to do it every frame, and
7389 * should thus be avoided for that purpose.
7390 *
7391 * The only requirement for the reordering is that an uncompressed image must
7392 * be available. If the RAM image is compressed, it will attempt to re-load
7393 * the texture from disk, if it doesn't find an uncompressed image there, it
7394 * will return NULL.
7395 */
7397get_ram_image_as(const string &requested_format) {
7398 CDWriter cdata(_cycler, false);
7399 string format = upcase(requested_format);
7400
7401 // Make sure we can grab something that's uncompressed.
7402 CPTA_uchar data = do_get_uncompressed_ram_image(cdata);
7403 if (data == nullptr) {
7404 gobj_cat.error() << "Couldn't find an uncompressed RAM image!\n";
7405 return CPTA_uchar(get_class_type());
7406 }
7407 size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
7408 (size_t)cdata->_z_size * (size_t)cdata->_num_views;
7409 nassertr(cdata->_num_components > 0 && cdata->_num_components <= 4, CPTA_uchar(get_class_type()));
7410 nassertr(data.size() == (size_t)(cdata->_component_width * cdata->_num_components * imgsize), CPTA_uchar(get_class_type()));
7411
7412 // Check if the format is already what we have internally.
7413 if ((cdata->_num_components == 1 && format.size() == 1) ||
7414 (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
7415 (cdata->_num_components == 3 && format == "BGR") ||
7416 (cdata->_num_components == 4 && format == "BGRA")) {
7417 // The format string is already our format, so we just need to copy it.
7418 return CPTA_uchar(data);
7419 }
7420
7421 // Check if we have an alpha channel, and remember which channel we use.
7422 int alpha = -1;
7423 if (Texture::has_alpha(cdata->_format)) {
7424 alpha = cdata->_num_components - 1;
7425 }
7426
7427 // Validate the format beforehand.
7428 for (size_t i = 0; i < format.size(); ++i) {
7429 if (format[i] != 'B' && format[i] != 'G' && format[i] != 'R' &&
7430 format[i] != 'A' && format[i] != '0' && format[i] != '1') {
7431 gobj_cat.error() << "Unexpected component character '"
7432 << format[i] << "', expected one of RGBA01!\n";
7433 return CPTA_uchar(get_class_type());
7434 }
7435 }
7436
7437 // Create a new empty array that can hold our image.
7438 PTA_uchar newdata = PTA_uchar::empty_array(imgsize * format.size() * cdata->_component_width, get_class_type());
7439
7440 // These ifs are for optimization of commonly used image types.
7441 if (cdata->_component_width == 1) {
7442 if (format == "RGBA" && cdata->_num_components == 4) {
7443 const uint32_t *src = (const uint32_t *)data.p();
7444 uint32_t *dst = (uint32_t *)newdata.p();
7445
7446 for (size_t p = 0; p < imgsize; ++p) {
7447 uint32_t v = *src++;
7448 *dst++ = ((v & 0xff00ff00u)) |
7449 ((v & 0x00ff0000u) >> 16) |
7450 ((v & 0x000000ffu) << 16);
7451 }
7452 return newdata;
7453 }
7454 if (format == "RGB" && cdata->_num_components == 4) {
7455 const uint32_t *src = (const uint32_t *)data.p();
7456 uint32_t *dst = (uint32_t *)newdata.p();
7457
7458 // Convert blocks of 4 pixels at a time, so that we can treat both the
7459 // source and destination as 32-bit integers.
7460 int blocks = imgsize >> 2;
7461 for (int i = 0; i < blocks; ++i) {
7462 uint32_t v0 = *src++;
7463 uint32_t v1 = *src++;
7464 uint32_t v2 = *src++;
7465 uint32_t v3 = *src++;
7466 *dst++ = ((v0 & 0x00ff0000u) >> 16) |
7467 ((v0 & 0x0000ff00u)) |
7468 ((v0 & 0x000000ffu) << 16) |
7469 ((v1 & 0x00ff0000u) << 8);
7470 *dst++ = ((v1 & 0x0000ff00u) >> 8) |
7471 ((v1 & 0x000000ffu) << 8) |
7472 ((v2 & 0x00ff0000u)) |
7473 ((v2 & 0x0000ff00u) << 16);
7474 *dst++ = ((v2 & 0x000000ffu)) |
7475 ((v3 & 0x00ff0000u) >> 8) |
7476 ((v3 & 0x0000ff00u) << 8) |
7477 ((v3 & 0x000000ffu) << 24);
7478 }
7479
7480 // If the image size wasn't a multiple of 4, we may have a handful of
7481 // pixels left over. Convert those the slower way.
7482 uint8_t *tail = (uint8_t *)dst;
7483 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7484 uint32_t v = *src++;
7485 *tail++ = (v & 0x00ff0000u) >> 16;
7486 *tail++ = (v & 0x0000ff00u) >> 8;
7487 *tail++ = (v & 0x000000ffu);
7488 }
7489 return newdata;
7490 }
7491 if (format == "BGR" && cdata->_num_components == 4) {
7492 const uint32_t *src = (const uint32_t *)data.p();
7493 uint32_t *dst = (uint32_t *)newdata.p();
7494
7495 // Convert blocks of 4 pixels at a time, so that we can treat both the
7496 // source and destination as 32-bit integers.
7497 int blocks = imgsize >> 2;
7498 for (int i = 0; i < blocks; ++i) {
7499 uint32_t v0 = *src++;
7500 uint32_t v1 = *src++;
7501 uint32_t v2 = *src++;
7502 uint32_t v3 = *src++;
7503 *dst++ = (v0 & 0x00ffffffu) | ((v1 & 0x000000ffu) << 24);
7504 *dst++ = ((v1 & 0x00ffff00u) >> 8) | ((v2 & 0x0000ffffu) << 16);
7505 *dst++ = ((v2 & 0x00ff0000u) >> 16) | ((v3 & 0x00ffffffu) << 8);
7506 }
7507
7508 // If the image size wasn't a multiple of 4, we may have a handful of
7509 // pixels left over. Convert those the slower way.
7510 uint8_t *tail = (uint8_t *)dst;
7511 for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7512 uint32_t v = *src++;
7513 *tail++ = (v & 0x000000ffu);
7514 *tail++ = (v & 0x0000ff00u) >> 8;
7515 *tail++ = (v & 0x00ff0000u) >> 16;
7516 }
7517 return newdata;
7518 }
7519 const uint8_t *src = (const uint8_t *)data.p();
7520 uint8_t *dst = (uint8_t *)newdata.p();
7521
7522 if (format == "RGB" && cdata->_num_components == 3) {
7523 for (int i = 0; i < imgsize; ++i) {
7524 *dst++ = src[2];
7525 *dst++ = src[1];
7526 *dst++ = src[0];
7527 src += 3;
7528 }
7529 return newdata;
7530 }
7531 if (format == "A" && cdata->_num_components != 3) {
7532 // We can generally rely on alpha to be the last component.
7533 for (size_t p = 0; p < imgsize; ++p) {
7534 dst[p] = src[alpha];
7535 src += cdata->_num_components;
7536 }
7537 return newdata;
7538 }
7539 // Fallback case for other 8-bit-per-channel formats.
7540 for (size_t p = 0; p < imgsize; ++p) {
7541 for (size_t i = 0; i < format.size(); ++i) {
7542 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7543 *dst++ = src[0];
7544 } else if (format[i] == 'G') {
7545 *dst++ = src[1];
7546 } else if (format[i] == 'R') {
7547 *dst++ = src[2];
7548 } else if (format[i] == 'A') {
7549 if (alpha >= 0) {
7550 *dst++ = src[alpha];
7551 } else {
7552 *dst++ = 0xff;
7553 }
7554 } else if (format[i] == '1') {
7555 *dst++ = 0xff;
7556 } else {
7557 *dst++ = 0x00;
7558 }
7559 }
7560 src += cdata->_num_components;
7561 }
7562 return newdata;
7563 }
7564
7565 // The slow and general case.
7566 for (size_t p = 0; p < imgsize; ++p) {
7567 for (size_t i = 0; i < format.size(); ++i) {
7568 int component = 0;
7569 if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7570 component = 0;
7571 } else if (format[i] == 'G') {
7572 component = 1;
7573 } else if (format[i] == 'R') {
7574 component = 2;
7575 } else if (format[i] == 'A') {
7576 if (alpha >= 0) {
7577 component = alpha;
7578 } else {
7579 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7580 continue;
7581 }
7582 } else if (format[i] == '1') {
7583 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7584 continue;
7585 } else {
7586 memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), 0, cdata->_component_width);
7587 continue;
7588 }
7589 memcpy((void*)(newdata + (p * format.size() + i) * cdata->_component_width),
7590 (void*)(data + (p * cdata->_num_components + component) * cdata->_component_width),
7591 cdata->_component_width);
7592 }
7593 }
7594 return newdata;
7595}
7596
7597/**
7598 *
7599 */
7600void Texture::
7601do_set_simple_ram_image(CData *cdata, CPTA_uchar image, int x_size, int y_size) {
7602 nassertv(cdata->_texture_type == TT_2d_texture);
7603 size_t expected_page_size = (size_t)(x_size * y_size * 4);
7604 nassertv(image.size() == expected_page_size);
7605
7606 cdata->_simple_x_size = x_size;
7607 cdata->_simple_y_size = y_size;
7608 cdata->_simple_ram_image._image = image.cast_non_const();
7609 cdata->_simple_ram_image._page_size = image.size();
7610 cdata->_simple_image_date_generated = (int32_t)time(nullptr);
7611 cdata->inc_simple_image_modified();
7612}
7613
7614/**
7615 *
7616 */
7617int Texture::
7618do_get_expected_num_mipmap_levels(const CData *cdata) const {
7619 if (cdata->_texture_type == Texture::TT_buffer_texture) {
7620 return 1;
7621 }
7622 int size = max(cdata->_x_size, cdata->_y_size);
7623 if (cdata->_texture_type == Texture::TT_3d_texture) {
7624 size = max(size, cdata->_z_size);
7625 }
7626 int count = 1;
7627 while (size > 1) {
7628 size >>= 1;
7629 ++count;
7630 }
7631 return count;
7632}
7633
7634/**
7635 *
7636 */
7637size_t Texture::
7638do_get_ram_mipmap_page_size(const CData *cdata, int n) const {
7639 if (cdata->_ram_image_compression != CM_off) {
7640 if (n >= 0 && n < (int)cdata->_ram_images.size()) {
7641 return cdata->_ram_images[n]._page_size;
7642 }
7643 return 0;
7644 } else {
7645 return do_get_expected_ram_mipmap_page_size(cdata, n);
7646 }
7647}
7648
7649/**
7650 *
7651 */
7652int Texture::
7653do_get_expected_mipmap_x_size(const CData *cdata, int n) const {
7654 int size = max(cdata->_x_size, 1);
7655 while (n > 0 && size > 1) {
7656 size >>= 1;
7657 --n;
7658 }
7659 return size;
7660}
7661
7662/**
7663 *
7664 */
7665int Texture::
7666do_get_expected_mipmap_y_size(const CData *cdata, int n) const {
7667 int size = max(cdata->_y_size, 1);
7668 while (n > 0 && size > 1) {
7669 size >>= 1;
7670 --n;
7671 }
7672 return size;
7673}
7674
7675/**
7676 *
7677 */
7678int Texture::
7679do_get_expected_mipmap_z_size(const CData *cdata, int n) const {
7680 // 3-D textures have a different number of pages per each mipmap level.
7681 // Other kinds of textures--especially, cube map textures--always have the
7682 // same.
7683 if (cdata->_texture_type == Texture::TT_3d_texture) {
7684 int size = max(cdata->_z_size, 1);
7685 while (n > 0 && size > 1) {
7686 size >>= 1;
7687 --n;
7688 }
7689 return size;
7690
7691 } else {
7692 return cdata->_z_size;
7693 }
7694}
7695
7696/**
7697 *
7698 */
7699void Texture::
7700do_clear_simple_ram_image(CData *cdata) {
7701 cdata->_simple_x_size = 0;
7702 cdata->_simple_y_size = 0;
7703 cdata->_simple_ram_image._image.clear();
7704 cdata->_simple_ram_image._page_size = 0;
7705 cdata->_simple_image_date_generated = 0;
7706
7707 // We allow this exception: we update the _simple_image_modified here, since
7708 // no one really cares much about that anyway, and it's convenient to do it
7709 // here.
7710 cdata->inc_simple_image_modified();
7711}
7712
7713/**
7714 *
7715 */
7716void Texture::
7717do_clear_ram_mipmap_images(CData *cdata) {
7718 if (!cdata->_ram_images.empty()) {
7719 cdata->_ram_images.erase(cdata->_ram_images.begin() + 1, cdata->_ram_images.end());
7720 }
7721}
7722
7723/**
7724 * Generates the RAM mipmap images for this texture, first uncompressing it as
7725 * required. Will recompress the image if it was originally compressed,
7726 * unless allow_recompress is true.
7727 */
7728void Texture::
7729do_generate_ram_mipmap_images(CData *cdata, bool allow_recompress) {
7730 nassertv(do_has_ram_image(cdata));
7731
7732 if (do_get_expected_num_mipmap_levels(cdata) == 1) {
7733 // Don't bother.
7734 return;
7735 }
7736
7737 RamImage orig_compressed_image;
7738 CompressionMode orig_compression_mode = CM_off;
7739
7740 if (cdata->_ram_image_compression != CM_off) {
7741 // The RAM image is compressed. This means we need to uncompress it in
7742 // order to generate mipmap images. Save the original first, to avoid
7743 // lossy recompression.
7744 orig_compressed_image = cdata->_ram_images[0];
7745 orig_compression_mode = cdata->_ram_image_compression;
7746
7747 // Now try to get the uncompressed source image.
7748 do_get_uncompressed_ram_image(cdata);
7749
7750 if (cdata->_ram_image_compression != CM_off) {
7751 gobj_cat.error()
7752 << "Cannot generate mipmap levels for image with compression "
7753 << cdata->_ram_image_compression << "\n";
7754 return;
7755 }
7756 }
7757
7758 do_clear_ram_mipmap_images(cdata);
7759
7760 if (gobj_cat.is_debug()) {
7761 gobj_cat.debug()
7762 << "Generating mipmap levels for " << *this << "\n";
7763 }
7764
7765 if (cdata->_texture_type == Texture::TT_3d_texture && cdata->_z_size != 1) {
7766 // Eek, a 3-D texture.
7767 int x_size = cdata->_x_size;
7768 int y_size = cdata->_y_size;
7769 int z_size = cdata->_z_size;
7770 int n = 0;
7771 while (x_size > 1 || y_size > 1 || z_size > 1) {
7772 cdata->_ram_images.push_back(RamImage());
7773 do_filter_3d_mipmap_level(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7774 x_size, y_size, z_size);
7775 x_size = max(x_size >> 1, 1);
7776 y_size = max(y_size >> 1, 1);
7777 z_size = max(z_size >> 1, 1);
7778 ++n;
7779 }
7780
7781 } else {
7782 // A 1-D, 2-D, or cube map texture.
7783 int x_size = cdata->_x_size;
7784 int y_size = cdata->_y_size;
7785 int n = 0;
7786 while (x_size > 1 || y_size > 1) {
7787 cdata->_ram_images.push_back(RamImage());
7788 do_filter_2d_mipmap_pages(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7789 x_size, y_size);
7790 x_size = max(x_size >> 1, 1);
7791 y_size = max(y_size >> 1, 1);
7792 ++n;
7793 }
7794 }
7795
7796 if (orig_compression_mode != CM_off && allow_recompress) {
7797 // Now attempt to recompress the mipmap images according to the original
7798 // compression mode. We don't need to bother compressing the first image
7799 // (it was already compressed, after all), so temporarily remove it from
7800 // the top of the mipmap stack, and compress all of the rest of them
7801 // instead.
7802 nassertv(cdata->_ram_images.size() > 1);
7803 int l0_x_size = cdata->_x_size;
7804 int l0_y_size = cdata->_y_size;
7805 int l0_z_size = cdata->_z_size;
7806 cdata->_x_size = do_get_expected_mipmap_x_size(cdata, 1);
7807 cdata->_y_size = do_get_expected_mipmap_y_size(cdata, 1);
7808 cdata->_z_size = do_get_expected_mipmap_z_size(cdata, 1);
7809 RamImage uncompressed_image = cdata->_ram_images[0];
7810 cdata->_ram_images.erase(cdata->_ram_images.begin());
7811
7812 bool success = do_compress_ram_image(cdata, orig_compression_mode, QL_default, nullptr);
7813 // Now restore the toplevel image.
7814 if (success) {
7815 if (gobj_cat.is_debug()) {
7816 gobj_cat.debug()
7817 << "Compressed " << get_name() << " generated mipmaps with "
7818 << cdata->_ram_image_compression << "\n";
7819 }
7820 cdata->_ram_images.insert(cdata->_ram_images.begin(), orig_compressed_image);
7821 } else {
7822 cdata->_ram_images.insert(cdata->_ram_images.begin(), uncompressed_image);
7823 }
7824 cdata->_x_size = l0_x_size;
7825 cdata->_y_size = l0_y_size;
7826 cdata->_z_size = l0_z_size;
7827 }
7828}
7829
7830/**
7831 *
7832 */
7833void Texture::
7834do_set_pad_size(CData *cdata, int x, int y, int z) {
7835 if (x > cdata->_x_size) {
7836 x = cdata->_x_size;
7837 }
7838 if (y > cdata->_y_size) {
7839 y = cdata->_y_size;
7840 }
7841 if (z > cdata->_z_size) {
7842 z = cdata->_z_size;
7843 }
7844
7845 cdata->_pad_x_size = x;
7846 cdata->_pad_y_size = y;
7847 cdata->_pad_z_size = z;
7848}
7849
7850/**
7851 * Returns true if we can safely call do_reload_ram_image() in order to make
7852 * the image available, or false if we shouldn't do this (because we know from
7853 * a priori knowledge that it wouldn't work anyway).
7854 */
7855bool Texture::
7856do_can_reload(const CData *cdata) const {
7857 return (cdata->_loaded_from_image && !cdata->_fullpath.empty());
7858}
7859
7860/**
7861 *
7862 */
7863bool Texture::
7864do_reload(CData *cdata) {
7865 if (do_can_reload(cdata)) {
7866 do_clear_ram_image(cdata);
7867 do_reload_ram_image(cdata, true);
7868 if (do_has_ram_image(cdata)) {
7869 // An explicit call to reload() should increment image_modified.
7870 cdata->inc_image_modified();
7871 return true;
7872 }
7873 return false;
7874 }
7875
7876 // We don't have a filename to load from.
7877 return false;
7878}
7879
7880/**
7881 * Returns true if there is a rawdata image that we have available to write to
7882 * the bam stream. For a normal Texture, this is the same thing as
7883 * do_has_ram_image(), but a movie texture might define it differently.
7884 */
7885bool Texture::
7886do_has_bam_rawdata(const CData *cdata) const {
7887 return do_has_ram_image(cdata);
7888}
7889
7890/**
7891 * If do_has_bam_rawdata() returned false, this attempts to reload the rawdata
7892 * image if possible.
7893 */
7894void Texture::
7895do_get_bam_rawdata(CData *cdata) {
7896 do_get_ram_image(cdata);
7897}
7898
7899/**
7900 * Internal method to convert pixel data from the indicated PNMImage into the
7901 * given ram_image.
7902 */
7903void Texture::
7904convert_from_pnmimage(PTA_uchar &image, size_t page_size,
7905 int row_stride, int x, int y, int z,
7906 const PNMImage &pnmimage, int num_components,
7907 int component_width) {
7908 int x_size = pnmimage.get_x_size();
7909 int y_size = pnmimage.get_y_size();
7910 xelval maxval = pnmimage.get_maxval();
7911 int pixel_size = num_components * component_width;
7912
7913 int row_skip = 0;
7914 if (row_stride == 0) {
7915 row_stride = x_size;
7916 } else {
7917 row_skip = (row_stride - x_size) * pixel_size;
7918 nassertv(row_skip >= 0);
7919 }
7920
7921 bool is_grayscale = (num_components == 1 || num_components == 2);
7922 bool has_alpha = (num_components == 2 || num_components == 4);
7923 bool img_has_alpha = pnmimage.has_alpha();
7924
7925 int idx = page_size * z;
7926 nassertv(idx + page_size <= image.size());
7927 unsigned char *p = &image[idx];
7928
7929 if (x != 0 || y != 0) {
7930 p += (row_stride * y + x) * pixel_size;
7931 }
7932
7933 if (maxval == 255 && component_width == 1) {
7934 // Most common case: one byte per pixel, and the source image shows a
7935 // maxval of 255. No scaling is necessary. Because this is such a common
7936 // case, we break it out per component for best performance.
7937 const xel *array = pnmimage.get_array();
7938 switch (num_components) {
7939 case 1:
7940 for (int j = y_size-1; j >= 0; j--) {
7941 const xel *row = array + j * x_size;
7942 for (int i = 0; i < x_size; i++) {
7943 *p++ = (uchar)PPM_GETB(row[i]);
7944 }
7945 p += row_skip;
7946 }
7947 break;
7948
7949 case 2:
7950 if (img_has_alpha) {
7951 const xelval *alpha = pnmimage.get_alpha_array();
7952 for (int j = y_size-1; j >= 0; j--) {
7953 const xel *row = array + j * x_size;
7954 const xelval *alpha_row = alpha + j * x_size;
7955 for (int i = 0; i < x_size; i++) {
7956 *p++ = (uchar)PPM_GETB(row[i]);
7957 *p++ = (uchar)alpha_row[i];
7958 }
7959 p += row_skip;
7960 }
7961 } else {
7962 for (int j = y_size-1; j >= 0; j--) {
7963 const xel *row = array + j * x_size;
7964 for (int i = 0; i < x_size; i++) {
7965 *p++ = (uchar)PPM_GETB(row[i]);
7966 *p++ = (uchar)255;
7967 }
7968 p += row_skip;
7969 }
7970 }
7971 break;
7972
7973 case 3:
7974 for (int j = y_size-1; j >= 0; j--) {
7975 const xel *row = array + j * x_size;
7976 for (int i = 0; i < x_size; i++) {
7977 *p++ = (uchar)PPM_GETB(row[i]);
7978 *p++ = (uchar)PPM_GETG(row[i]);
7979 *p++ = (uchar)PPM_GETR(row[i]);
7980 }
7981 p += row_skip;
7982 }
7983 break;
7984
7985 case 4:
7986 if (img_has_alpha) {
7987 const xelval *alpha = pnmimage.get_alpha_array();
7988 for (int j = y_size-1; j >= 0; j--) {
7989 const xel *row = array + j * x_size;
7990 const xelval *alpha_row = alpha + j * x_size;
7991 for (int i = 0; i < x_size; i++) {
7992 *p++ = (uchar)PPM_GETB(row[i]);
7993 *p++ = (uchar)PPM_GETG(row[i]);
7994 *p++ = (uchar)PPM_GETR(row[i]);
7995 *p++ = (uchar)alpha_row[i];
7996 }
7997 p += row_skip;
7998 }
7999 } else {
8000 for (int j = y_size-1; j >= 0; j--) {
8001 const xel *row = array + j * x_size;
8002 for (int i = 0; i < x_size; i++) {
8003 *p++ = (uchar)PPM_GETB(row[i]);
8004 *p++ = (uchar)PPM_GETG(row[i]);
8005 *p++ = (uchar)PPM_GETR(row[i]);
8006 *p++ = (uchar)255;
8007 }
8008 p += row_skip;
8009 }
8010 }
8011 break;
8012
8013 default:
8014 nassertv(num_components >= 1 && num_components <= 4);
8015 break;
8016 }
8017
8018 } else if (maxval == 65535 && component_width == 2) {
8019 // Another possible case: two bytes per pixel, and the source image shows
8020 // a maxval of 65535. Again, no scaling is necessary.
8021 for (int j = y_size-1; j >= 0; j--) {
8022 for (int i = 0; i < x_size; i++) {
8023 if (is_grayscale) {
8024 store_unscaled_short(p, pnmimage.get_gray_val(i, j));
8025 } else {
8026 store_unscaled_short(p, pnmimage.get_blue_val(i, j));
8027 store_unscaled_short(p, pnmimage.get_green_val(i, j));
8028 store_unscaled_short(p, pnmimage.get_red_val(i, j));
8029 }
8030 if (has_alpha) {
8031 if (img_has_alpha) {
8032 store_unscaled_short(p, pnmimage.get_alpha_val(i, j));
8033 } else {
8034 store_unscaled_short(p, 65535);
8035 }
8036 }
8037 }
8038 p += row_skip;
8039 }
8040
8041 } else if (component_width == 1) {
8042 // A less common case: one byte per pixel, but the maxval is something
8043 // other than 255. In this case, we should scale the pixel values up to
8044 // the appropriate amount.
8045 double scale = 255.0 / (double)maxval;
8046
8047 for (int j = y_size-1; j >= 0; j--) {
8048 for (int i = 0; i < x_size; i++) {
8049 if (is_grayscale) {
8050 store_scaled_byte(p, pnmimage.get_gray_val(i, j), scale);
8051 } else {
8052 store_scaled_byte(p, pnmimage.get_blue_val(i, j), scale);
8053 store_scaled_byte(p, pnmimage.get_green_val(i, j), scale);
8054 store_scaled_byte(p, pnmimage.get_red_val(i, j), scale);
8055 }
8056 if (has_alpha) {
8057 if (img_has_alpha) {
8058 store_scaled_byte(p, pnmimage.get_alpha_val(i, j), scale);
8059 } else {
8060 store_unscaled_byte(p, 255);
8061 }
8062 }
8063 }
8064 p += row_skip;
8065 }
8066
8067 } else { // component_width == 2
8068 // Another uncommon case: two bytes per pixel, and the maxval is something
8069 // other than 65535. Again, we must scale the pixel values.
8070 double scale = 65535.0 / (double)maxval;
8071
8072 for (int j = y_size-1; j >= 0; j--) {
8073 for (int i = 0; i < x_size; i++) {
8074 if (is_grayscale) {
8075 store_scaled_short(p, pnmimage.get_gray_val(i, j), scale);
8076 } else {
8077 store_scaled_short(p, pnmimage.get_blue_val(i, j), scale);
8078 store_scaled_short(p, pnmimage.get_green_val(i, j), scale);
8079 store_scaled_short(p, pnmimage.get_red_val(i, j), scale);
8080 }
8081 if (has_alpha) {
8082 if (img_has_alpha) {
8083 store_scaled_short(p, pnmimage.get_alpha_val(i, j), 1.0);
8084 } else {
8085 store_unscaled_short(p, 65535);
8086 }
8087 }
8088 }
8089 p += row_skip;
8090 }
8091 }
8092}
8093
8094/**
8095 * Internal method to convert pixel data from the indicated PfmFile into the
8096 * given ram_image.
8097 */
8098void Texture::
8099convert_from_pfm(PTA_uchar &image, size_t page_size, int z,
8100 const PfmFile &pfm, int num_components, int component_width) {
8101 nassertv(component_width == 4); // Currently only PN_float32 is expected.
8102 int x_size = pfm.get_x_size();
8103 int y_size = pfm.get_y_size();
8104
8105 int idx = page_size * z;
8106 nassertv(idx + page_size <= image.size());
8107 PN_float32 *p = (PN_float32 *)&image[idx];
8108
8109 switch (num_components) {
8110 case 1:
8111 {
8112 for (int j = y_size-1; j >= 0; j--) {
8113 for (int i = 0; i < x_size; i++) {
8114 p[0] = pfm.get_channel(i, j, 0);
8115 ++p;
8116 }
8117 }
8118 }
8119 break;
8120
8121 case 2:
8122 {
8123 for (int j = y_size-1; j >= 0; j--) {
8124 for (int i = 0; i < x_size; i++) {
8125 p[0] = pfm.get_channel(i, j, 0);
8126 p[1] = pfm.get_channel(i, j, 1);
8127 p += 2;
8128 }
8129 }
8130 }
8131 break;
8132
8133 case 3:
8134 {
8135 // RGB -> BGR
8136 for (int j = y_size-1; j >= 0; j--) {
8137 for (int i = 0; i < x_size; i++) {
8138 p[0] = pfm.get_channel(i, j, 2);
8139 p[1] = pfm.get_channel(i, j, 1);
8140 p[2] = pfm.get_channel(i, j, 0);
8141 p += 3;
8142 }
8143 }
8144 }
8145 break;
8146
8147 case 4:
8148 {
8149 // RGBA -> BGRA
8150 for (int j = y_size-1; j >= 0; j--) {
8151 for (int i = 0; i < x_size; i++) {
8152 p[0] = pfm.get_channel(i, j, 2);
8153 p[1] = pfm.get_channel(i, j, 1);
8154 p[2] = pfm.get_channel(i, j, 0);
8155 p[3] = pfm.get_channel(i, j, 3);
8156 p += 4;
8157 }
8158 }
8159 }
8160 break;
8161
8162 default:
8163 nassert_raise("unexpected channel count");
8164 return;
8165 }
8166
8167 nassertv((unsigned char *)p == &image[idx] + page_size);
8168}
8169
8170/**
8171 * Internal method to convert pixel data to the indicated PNMImage from the
8172 * given ram_image.
8173 */
8174bool Texture::
8175convert_to_pnmimage(PNMImage &pnmimage, int x_size, int y_size,
8176 int num_components, ComponentType component_type,
8177 bool is_srgb, CPTA_uchar image, size_t page_size, int z) {
8178 xelval maxval = 0xff;
8179 if (component_type != T_unsigned_byte && component_type != T_byte) {
8180 maxval = 0xffff;
8181 }
8182 ColorSpace color_space = is_srgb ? CS_sRGB : CS_linear;
8183 pnmimage.clear(x_size, y_size, num_components, maxval, nullptr, color_space);
8184 bool has_alpha = pnmimage.has_alpha();
8185 bool is_grayscale = pnmimage.is_grayscale();
8186
8187 int idx = page_size * z;
8188 nassertr(idx + page_size <= image.size(), false);
8189
8190 xel *array = pnmimage.get_array();
8191 xelval *alpha = pnmimage.get_alpha_array();
8192
8193 switch (component_type) {
8194 case T_unsigned_byte:
8195 if (is_grayscale) {
8196 const unsigned char *p = &image[idx];
8197 if (has_alpha) {
8198 for (int j = y_size-1; j >= 0; j--) {
8199 xel *row = array + j * x_size;
8200 xelval *alpha_row = alpha + j * x_size;
8201 for (int i = 0; i < x_size; i++) {
8202 PPM_PUTB(row[i], *p++);
8203 alpha_row[i] = *p++;
8204 }
8205 }
8206 } else {
8207 for (int j = y_size-1; j >= 0; j--) {
8208 xel *row = array + j * x_size;
8209 for (int i = 0; i < x_size; i++) {
8210 PPM_PUTB(row[i], *p++);
8211 }
8212 }
8213 }
8214 nassertr(p == &image[idx] + page_size, false);
8215 } else {
8216 const unsigned char *p = &image[idx];
8217 if (has_alpha) {
8218 for (int j = y_size-1; j >= 0; j--) {
8219 xel *row = array + j * x_size;
8220 xelval *alpha_row = alpha + j * x_size;
8221 for (int i = 0; i < x_size; i++) {
8222 PPM_PUTB(row[i], *p++);
8223 PPM_PUTG(row[i], *p++);
8224 PPM_PUTR(row[i], *p++);
8225 alpha_row[i] = *p++;
8226 }
8227 }
8228 } else {
8229 for (int j = y_size-1; j >= 0; j--) {
8230 xel *row = array + j * x_size;
8231 for (int i = 0; i < x_size; i++) {
8232 PPM_PUTB(row[i], *p++);
8233 PPM_PUTG(row[i], *p++);
8234 PPM_PUTR(row[i], *p++);
8235 }
8236 }
8237 }
8238 nassertr(p == &image[idx] + page_size, false);
8239 }
8240 break;
8241
8242 case T_unsigned_short:
8243 {
8244 const uint16_t *p = (const uint16_t *)&image[idx];
8245
8246 for (int j = y_size-1; j >= 0; j--) {
8247 xel *row = array + j * x_size;
8248 xelval *alpha_row = alpha + j * x_size;
8249 for (int i = 0; i < x_size; i++) {
8250 PPM_PUTB(row[i], *p++);
8251 if (!is_grayscale) {
8252 PPM_PUTG(row[i], *p++);
8253 PPM_PUTR(row[i], *p++);
8254 }
8255 if (has_alpha) {
8256 alpha_row[i] = *p++;
8257 }
8258 }
8259 }
8260 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8261 }
8262 break;
8263
8264 case T_unsigned_int:
8265 {
8266 const uint32_t *p = (const uint32_t *)&image[idx];
8267
8268 for (int j = y_size-1; j >= 0; j--) {
8269 xel *row = array + j * x_size;
8270 xelval *alpha_row = alpha + j * x_size;
8271 for (int i = 0; i < x_size; i++) {
8272 PPM_PUTB(row[i], (*p++) >> 16u);
8273 if (!is_grayscale) {
8274 PPM_PUTG(row[i], (*p++) >> 16u);
8275 PPM_PUTR(row[i], (*p++) >> 16u);
8276 }
8277 if (has_alpha) {
8278 alpha_row[i] = (*p++) >> 16u;
8279 }
8280 }
8281 }
8282 nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8283 }
8284 break;
8285
8286 case T_half_float:
8287 {
8288 const unsigned char *p = &image[idx];
8289
8290 for (int j = y_size-1; j >= 0; j--) {
8291 for (int i = 0; i < x_size; i++) {
8292 pnmimage.set_blue(i, j, get_half_float(p));
8293 if (!is_grayscale) {
8294 pnmimage.set_green(i, j, get_half_float(p));
8295 pnmimage.set_red(i, j, get_half_float(p));
8296 }
8297 if (has_alpha) {
8298 pnmimage.set_alpha(i, j, get_half_float(p));
8299 }
8300 }
8301 }
8302 nassertr(p == &image[idx] + page_size, false);
8303 }
8304 break;
8305
8306 default:
8307 return false;
8308 }
8309
8310 return true;
8311}
8312
8313/**
8314 * Internal method to convert pixel data to the indicated PfmFile from the
8315 * given ram_image.
8316 */
8317bool Texture::
8318convert_to_pfm(PfmFile &pfm, int x_size, int y_size,
8319 int num_components, int component_width,
8320 CPTA_uchar image, size_t page_size, int z) {
8321 nassertr(component_width == 4, false); // Currently only PN_float32 is expected.
8322 pfm.clear(x_size, y_size, num_components);
8323
8324 int idx = page_size * z;
8325 nassertr(idx + page_size <= image.size(), false);
8326 const PN_float32 *p = (const PN_float32 *)&image[idx];
8327
8328 switch (num_components) {
8329 case 1:
8330 for (int j = y_size-1; j >= 0; j--) {
8331 for (int i = 0; i < x_size; i++) {
8332 pfm.set_channel(i, j, 0, p[0]);
8333 ++p;
8334 }
8335 }
8336 break;
8337
8338 case 2:
8339 for (int j = y_size-1; j >= 0; j--) {
8340 for (int i = 0; i < x_size; i++) {
8341 pfm.set_channel(i, j, 0, p[0]);
8342 pfm.set_channel(i, j, 1, p[1]);
8343 p += 2;
8344 }
8345 }
8346 break;
8347
8348 case 3:
8349 // BGR -> RGB
8350 for (int j = y_size-1; j >= 0; j--) {
8351 for (int i = 0; i < x_size; i++) {
8352 pfm.set_channel(i, j, 2, p[0]);
8353 pfm.set_channel(i, j, 1, p[1]);
8354 pfm.set_channel(i, j, 0, p[2]);
8355 p += 3;
8356 }
8357 }
8358 break;
8359
8360 case 4:
8361 // BGRA -> RGBA
8362 for (int j = y_size-1; j >= 0; j--) {
8363 for (int i = 0; i < x_size; i++) {
8364 pfm.set_channel(i, j, 2, p[0]);
8365 pfm.set_channel(i, j, 1, p[1]);
8366 pfm.set_channel(i, j, 0, p[2]);
8367 pfm.set_channel(i, j, 3, p[3]);
8368 p += 4;
8369 }
8370 }
8371 break;
8372
8373 default:
8374 nassert_raise("unexpected channel count");
8375 return false;
8376 }
8377
8378 nassertr((unsigned char *)p == &image[idx] + page_size, false);
8379 return true;
8380}
8381
8382/**
8383 * Called by read_dds for a DDS file in BGR8 format.
8384 */
8385PTA_uchar Texture::
8386read_dds_level_bgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8387 // This is in order B, G, R.
8388 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8389 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8390
8391 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8392 size_t row_bytes = x_size * 3;
8393 PTA_uchar image = PTA_uchar::empty_array(size);
8394 for (int y = y_size - 1; y >= 0; --y) {
8395 unsigned char *p = image.p() + y * row_bytes;
8396 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8397 in.read((char *)p, row_bytes);
8398 }
8399
8400 return image;
8401}
8402
8403/**
8404 * Called by read_dds for a DDS file in RGB8 format.
8405 */
8406PTA_uchar Texture::
8407read_dds_level_rgb8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8408 // This is in order R, G, B.
8409 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8410 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8411
8412 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8413 size_t row_bytes = x_size * 3;
8414 PTA_uchar image = PTA_uchar::empty_array(size);
8415 for (int y = y_size - 1; y >= 0; --y) {
8416 unsigned char *p = image.p() + y * row_bytes;
8417 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8418 in.read((char *)p, row_bytes);
8419
8420 // Now reverse the r, g, b triples.
8421 for (int x = 0; x < x_size; ++x) {
8422 unsigned char r = p[0];
8423 p[0] = p[2];
8424 p[2] = r;
8425 p += 3;
8426 }
8427 nassertr(p <= image.p() + size, PTA_uchar());
8428 }
8429
8430 return image;
8431}
8432
8433/**
8434 * Called by read_dds for a DDS file in ABGR8 format.
8435 */
8436PTA_uchar Texture::
8437read_dds_level_abgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8438 // This is laid out in order R, G, B, A.
8439 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8440 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8441
8442 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8443 size_t row_bytes = x_size * 4;
8444 PTA_uchar image = PTA_uchar::empty_array(size);
8445 for (int y = y_size - 1; y >= 0; --y) {
8446 unsigned char *p = image.p() + y * row_bytes;
8447 in.read((char *)p, row_bytes);
8448
8449 uint32_t *pw = (uint32_t *)p;
8450 for (int x = 0; x < x_size; ++x) {
8451 uint32_t w = *pw;
8452#ifdef WORDS_BIGENDIAN
8453 // bigendian: convert R, G, B, A to B, G, R, A.
8454 w = ((w & 0xff00) << 16) | ((w & 0xff000000U) >> 16) | (w & 0xff00ff);
8455#else
8456 // littendian: convert A, B, G, R to to A, R, G, B.
8457 w = ((w & 0xff) << 16) | ((w & 0xff0000) >> 16) | (w & 0xff00ff00U);
8458#endif
8459 *pw = w;
8460 ++pw;
8461 }
8462 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8463 }
8464
8465 return image;
8466}
8467
8468/**
8469 * Called by read_dds for a DDS file in RGBA8 format.
8470 */
8471PTA_uchar Texture::
8472read_dds_level_rgba8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8473 // This is actually laid out in order B, G, R, A.
8474 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8475 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8476
8477 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8478 size_t row_bytes = x_size * 4;
8479 PTA_uchar image = PTA_uchar::empty_array(size);
8480 for (int y = y_size - 1; y >= 0; --y) {
8481 unsigned char *p = image.p() + y * row_bytes;
8482 nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8483 in.read((char *)p, row_bytes);
8484 }
8485
8486 return image;
8487}
8488
8489/**
8490 * Called by read_dds for a DDS file in ABGR16 format.
8491 */
8492PTA_uchar Texture::
8493read_dds_level_abgr16(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8494 // This is laid out in order R, G, B, A.
8495 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8496 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8497
8498 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8499 size_t row_bytes = x_size * 8;
8500 PTA_uchar image = PTA_uchar::empty_array(size);
8501 for (int y = y_size - 1; y >= 0; --y) {
8502 unsigned char *p = image.p() + y * row_bytes;
8503 in.read((char *)p, row_bytes);
8504
8505 uint16_t *pw = (uint16_t *)p;
8506 for (int x = 0; x < x_size; ++x) {
8507 swap(pw[0], pw[2]);
8508 pw += 4;
8509 }
8510 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8511 }
8512
8513 return image;
8514}
8515
8516/**
8517 * Called by read_dds for a DDS file in ABGR32 format.
8518 */
8519PTA_uchar Texture::
8520read_dds_level_abgr32(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8521 // This is laid out in order R, G, B, A.
8522 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8523 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8524
8525 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8526 size_t row_bytes = x_size * 16;
8527 nassertr(row_bytes * y_size == size, PTA_uchar());
8528 PTA_uchar image = PTA_uchar::empty_array(size);
8529 for (int y = y_size - 1; y >= 0; --y) {
8530 unsigned char *p = image.p() + y * row_bytes;
8531 in.read((char *)p, row_bytes);
8532
8533 uint32_t *pw = (uint32_t *)p;
8534 for (int x = 0; x < x_size; ++x) {
8535 swap(pw[0], pw[2]);
8536 pw += 4;
8537 }
8538 nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8539 }
8540
8541 return image;
8542}
8543
8544/**
8545 * Called by read_dds for a DDS file that needs no transformations applied.
8546 */
8547PTA_uchar Texture::
8548read_dds_level_raw(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8549 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8550 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8551
8552 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8553 size_t row_bytes = x_size * cdata->_num_components * cdata->_component_width;
8554 nassertr(row_bytes * y_size == size, PTA_uchar());
8555 PTA_uchar image = PTA_uchar::empty_array(size);
8556 for (int y = y_size - 1; y >= 0; --y) {
8557 unsigned char *p = image.p() + y * row_bytes;
8558 in.read((char *)p, row_bytes);
8559 }
8560
8561 return image;
8562}
8563
8564/**
8565 * Called by read_dds for a DDS file whose format isn't one we've specifically
8566 * optimized.
8567 */
8568PTA_uchar Texture::
8569read_dds_level_generic_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8570 int n, istream &in) {
8571 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8572 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8573
8574 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8575
8576 // MS says the pitch can be supplied in the header file and must be DWORD
8577 // aligned, but this appears to apply to level 0 mipmaps only (where it
8578 // almost always will be anyway). Other mipmap levels seem to be tightly
8579 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8580 if (n == 0) {
8581 pitch = ((pitch + 3) / 4) * 4;
8582 if (header.dds_flags & DDSD_PITCH) {
8583 pitch = header.pitch;
8584 }
8585 }
8586
8587 int bpp = header.pf.rgb_bitcount / 8;
8588 int skip_bytes = pitch - (bpp * x_size);
8589 nassertr(skip_bytes >= 0, PTA_uchar());
8590
8591 unsigned int r_mask = header.pf.r_mask;
8592 unsigned int g_mask = header.pf.g_mask;
8593 unsigned int b_mask = header.pf.b_mask;
8594 unsigned int a_mask = header.pf.a_mask;
8595
8596 // Determine the number of bits to shift each mask to the right so that the
8597 // lowest on bit is at bit 0.
8598 int r_shift = get_lowest_on_bit(r_mask);
8599 int g_shift = get_lowest_on_bit(g_mask);
8600 int b_shift = get_lowest_on_bit(b_mask);
8601 int a_shift = get_lowest_on_bit(a_mask);
8602
8603 // Then determine the scale factor required to raise the highest color value
8604 // to 0xff000000.
8605 unsigned int r_scale = 0;
8606 if (r_mask != 0) {
8607 r_scale = 0xff000000 / (r_mask >> r_shift);
8608 }
8609 unsigned int g_scale = 0;
8610 if (g_mask != 0) {
8611 g_scale = 0xff000000 / (g_mask >> g_shift);
8612 }
8613 unsigned int b_scale = 0;
8614 if (b_mask != 0) {
8615 b_scale = 0xff000000 / (b_mask >> b_shift);
8616 }
8617 unsigned int a_scale = 0;
8618 if (a_mask != 0) {
8619 a_scale = 0xff000000 / (a_mask >> a_shift);
8620 }
8621
8622 bool add_alpha = has_alpha(cdata->_format);
8623
8624 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8625 size_t row_bytes = x_size * cdata->_num_components;
8626 PTA_uchar image = PTA_uchar::empty_array(size);
8627 for (int y = y_size - 1; y >= 0; --y) {
8628 unsigned char *p = image.p() + y * row_bytes;
8629 for (int x = 0; x < x_size; ++x) {
8630
8631 // Read a little-endian numeric value of bpp bytes.
8632 unsigned int pixel = 0;
8633 int shift = 0;
8634 for (int bi = 0; bi < bpp; ++bi) {
8635 unsigned int ch = (unsigned char)in.get();
8636 pixel |= (ch << shift);
8637 shift += 8;
8638 }
8639
8640 // Then break apart that value into its R, G, B, and maybe A components.
8641 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8642 unsigned int g = (((pixel & g_mask) >> g_shift) * g_scale) >> 24;
8643 unsigned int b = (((pixel & b_mask) >> b_shift) * b_scale) >> 24;
8644
8645 // Store the components in the Texture's image data.
8646 store_unscaled_byte(p, b);
8647 store_unscaled_byte(p, g);
8648 store_unscaled_byte(p, r);
8649 if (add_alpha) {
8650 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8651 store_unscaled_byte(p, a);
8652 }
8653 }
8654 nassertr(p <= image.p() + size, PTA_uchar());
8655 for (int bi = 0; bi < skip_bytes; ++bi) {
8656 in.get();
8657 }
8658 }
8659
8660 return image;
8661}
8662
8663/**
8664 * Called by read_dds for a DDS file in uncompressed luminance or luminance-
8665 * alpha format.
8666 */
8667PTA_uchar Texture::
8668read_dds_level_luminance_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8669 int n, istream &in) {
8670 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8671 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8672
8673 int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8674
8675 // MS says the pitch can be supplied in the header file and must be DWORD
8676 // aligned, but this appears to apply to level 0 mipmaps only (where it
8677 // almost always will be anyway). Other mipmap levels seem to be tightly
8678 // packed, but there isn't a separate pitch for each mipmap level. Weird.
8679 if (n == 0) {
8680 pitch = ((pitch + 3) / 4) * 4;
8681 if (header.dds_flags & DDSD_PITCH) {
8682 pitch = header.pitch;
8683 }
8684 }
8685
8686 int bpp = header.pf.rgb_bitcount / 8;
8687 int skip_bytes = pitch - (bpp * x_size);
8688 nassertr(skip_bytes >= 0, PTA_uchar());
8689
8690 unsigned int r_mask = header.pf.r_mask;
8691 unsigned int a_mask = header.pf.a_mask;
8692
8693 // Determine the number of bits to shift each mask to the right so that the
8694 // lowest on bit is at bit 0.
8695 int r_shift = get_lowest_on_bit(r_mask);
8696 int a_shift = get_lowest_on_bit(a_mask);
8697
8698 // Then determine the scale factor required to raise the highest color value
8699 // to 0xff000000.
8700 unsigned int r_scale = 0;
8701 if (r_mask != 0) {
8702 r_scale = 0xff000000 / (r_mask >> r_shift);
8703 }
8704 unsigned int a_scale = 0;
8705 if (a_mask != 0) {
8706 a_scale = 0xff000000 / (a_mask >> a_shift);
8707 }
8708
8709 bool add_alpha = has_alpha(cdata->_format);
8710
8711 size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8712 size_t row_bytes = x_size * cdata->_num_components;
8713 PTA_uchar image = PTA_uchar::empty_array(size);
8714 for (int y = y_size - 1; y >= 0; --y) {
8715 unsigned char *p = image.p() + y * row_bytes;
8716 for (int x = 0; x < x_size; ++x) {
8717
8718 // Read a little-endian numeric value of bpp bytes.
8719 unsigned int pixel = 0;
8720 int shift = 0;
8721 for (int bi = 0; bi < bpp; ++bi) {
8722 unsigned int ch = (unsigned char)in.get();
8723 pixel |= (ch << shift);
8724 shift += 8;
8725 }
8726
8727 unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8728
8729 // Store the components in the Texture's image data.
8730 store_unscaled_byte(p, r);
8731 if (add_alpha) {
8732 unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8733 store_unscaled_byte(p, a);
8734 }
8735 }
8736 nassertr(p <= image.p() + size, PTA_uchar());
8737 for (int bi = 0; bi < skip_bytes; ++bi) {
8738 in.get();
8739 }
8740 }
8741
8742 return image;
8743}
8744
8745/**
8746 * Called by read_dds for DXT1 file format.
8747 */
8748PTA_uchar Texture::
8749read_dds_level_bc1(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8750 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8751 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8752
8753 static const int div = 4;
8754 static const int block_bytes = 8;
8755
8756 // The DXT1 image is divided into num_rows x num_cols blocks, where each
8757 // block represents 4x4 pixels.
8758 int num_cols = max(div, x_size) / div;
8759 int num_rows = max(div, y_size) / div;
8760 int row_length = num_cols * block_bytes;
8761 int linear_size = row_length * num_rows;
8762
8763 if (n == 0) {
8764 if (header.dds_flags & DDSD_LINEARSIZE) {
8765 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8766 }
8767 }
8768
8769 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8770
8771 if (y_size >= 4) {
8772 // We have to flip the image as we read it, because of DirectX's inverted
8773 // sense of up. That means we (a) reverse the order of the rows of blocks
8774 // . . .
8775 for (int ri = num_rows - 1; ri >= 0; --ri) {
8776 unsigned char *p = image.p() + row_length * ri;
8777 in.read((char *)p, row_length);
8778
8779 for (int ci = 0; ci < num_cols; ++ci) {
8780 // . . . and (b) within each block, we reverse the 4 individual rows
8781 // of 4 pixels.
8782 uint32_t *cells = (uint32_t *)p;
8783 uint32_t w = cells[1];
8784 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8785 cells[1] = w;
8786
8787 p += block_bytes;
8788 }
8789 }
8790
8791 } else if (y_size >= 2) {
8792 // To invert a two-pixel high image, we just flip two rows within a cell.
8793 unsigned char *p = image.p();
8794 in.read((char *)p, row_length);
8795
8796 for (int ci = 0; ci < num_cols; ++ci) {
8797 uint32_t *cells = (uint32_t *)p;
8798 uint32_t w = cells[1];
8799 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8800 cells[1] = w;
8801
8802 p += block_bytes;
8803 }
8804
8805 } else if (y_size >= 1) {
8806 // No need to invert a one-pixel-high image.
8807 unsigned char *p = image.p();
8808 in.read((char *)p, row_length);
8809 }
8810
8811 return image;
8812}
8813
8814/**
8815 * Called by read_dds for DXT2 or DXT3 file format.
8816 */
8817PTA_uchar Texture::
8818read_dds_level_bc2(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8819 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8820 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8821
8822 static const int div = 4;
8823 static const int block_bytes = 16;
8824
8825 // The DXT3 image is divided into num_rows x num_cols blocks, where each
8826 // block represents 4x4 pixels. Unlike DXT1, each block consists of two
8827 // 8-byte chunks, representing the alpha and color separately.
8828 int num_cols = max(div, x_size) / div;
8829 int num_rows = max(div, y_size) / div;
8830 int row_length = num_cols * block_bytes;
8831 int linear_size = row_length * num_rows;
8832
8833 if (n == 0) {
8834 if (header.dds_flags & DDSD_LINEARSIZE) {
8835 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8836 }
8837 }
8838
8839 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8840
8841 if (y_size >= 4) {
8842 // We have to flip the image as we read it, because of DirectX's inverted
8843 // sense of up. That means we (a) reverse the order of the rows of blocks
8844 // . . .
8845 for (int ri = num_rows - 1; ri >= 0; --ri) {
8846 unsigned char *p = image.p() + row_length * ri;
8847 in.read((char *)p, row_length);
8848
8849 for (int ci = 0; ci < num_cols; ++ci) {
8850 // . . . and (b) within each block, we reverse the 4 individual rows
8851 // of 4 pixels.
8852 uint32_t *cells = (uint32_t *)p;
8853
8854 // Alpha. The block is four 16-bit words of pixel data.
8855 uint32_t w0 = cells[0];
8856 uint32_t w1 = cells[1];
8857 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8858 w1 = ((w1 & 0xffff) << 16) | ((w1 & 0xffff0000U) >> 16);
8859 cells[0] = w1;
8860 cells[1] = w0;
8861
8862 // Color. Only the second 32-bit dword of the color block represents
8863 // the pixel data.
8864 uint32_t w = cells[3];
8865 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8866 cells[3] = w;
8867
8868 p += block_bytes;
8869 }
8870 }
8871
8872 } else if (y_size >= 2) {
8873 // To invert a two-pixel high image, we just flip two rows within a cell.
8874 unsigned char *p = image.p();
8875 in.read((char *)p, row_length);
8876
8877 for (int ci = 0; ci < num_cols; ++ci) {
8878 uint32_t *cells = (uint32_t *)p;
8879
8880 uint32_t w0 = cells[0];
8881 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8882 cells[0] = w0;
8883
8884 uint32_t w = cells[3];
8885 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8886 cells[3] = w;
8887
8888 p += block_bytes;
8889 }
8890
8891 } else if (y_size >= 1) {
8892 // No need to invert a one-pixel-high image.
8893 unsigned char *p = image.p();
8894 in.read((char *)p, row_length);
8895 }
8896
8897 return image;
8898}
8899
8900/**
8901 * Called by read_dds for DXT4 or DXT5 file format.
8902 */
8903PTA_uchar Texture::
8904read_dds_level_bc3(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8905 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8906 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8907
8908 static const int div = 4;
8909 static const int block_bytes = 16;
8910
8911 // The DXT5 image is similar to DXT3, in that there each 4x4 block of pixels
8912 // consists of an alpha block and a color block, but the layout of the alpha
8913 // block is different.
8914 int num_cols = max(div, x_size) / div;
8915 int num_rows = max(div, y_size) / div;
8916 int row_length = num_cols * block_bytes;
8917 int linear_size = row_length * num_rows;
8918
8919 if (n == 0) {
8920 if (header.dds_flags & DDSD_LINEARSIZE) {
8921 nassertr(linear_size == (int)header.pitch, PTA_uchar());
8922 }
8923 }
8924
8925 PTA_uchar image = PTA_uchar::empty_array(linear_size);
8926
8927 if (y_size >= 4) {
8928 // We have to flip the image as we read it, because of DirectX's inverted
8929 // sense of up. That means we (a) reverse the order of the rows of blocks
8930 // . . .
8931 for (int ri = num_rows - 1; ri >= 0; --ri) {
8932 unsigned char *p = image.p() + row_length * ri;
8933 in.read((char *)p, row_length);
8934
8935 for (int ci = 0; ci < num_cols; ++ci) {
8936 // . . . and (b) within each block, we reverse the 4 individual rows
8937 // of 4 pixels.
8938 uint32_t *cells = (uint32_t *)p;
8939
8940 // Alpha. The block is one 16-bit word of reference values, followed
8941 // by six words of pixel values, in 12-bit rows. Tricky to invert.
8942 unsigned char p2 = p[2];
8943 unsigned char p3 = p[3];
8944 unsigned char p4 = p[4];
8945 unsigned char p5 = p[5];
8946 unsigned char p6 = p[6];
8947 unsigned char p7 = p[7];
8948
8949 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
8950 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
8951 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
8952 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8953 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8954 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8955
8956 // Color. Only the second 32-bit dword of the color block represents
8957 // the pixel data.
8958 uint32_t w = cells[3];
8959 w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8960 cells[3] = w;
8961
8962 p += block_bytes;
8963 }
8964 }
8965
8966 } else if (y_size >= 2) {
8967 // To invert a two-pixel high image, we just flip two rows within a cell.
8968 unsigned char *p = image.p();
8969 in.read((char *)p, row_length);
8970
8971 for (int ci = 0; ci < num_cols; ++ci) {
8972 uint32_t *cells = (uint32_t *)p;
8973
8974 unsigned char p2 = p[2];
8975 unsigned char p3 = p[3];
8976 unsigned char p4 = p[4];
8977
8978 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8979 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8980 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8981
8982 uint32_t w0 = cells[0];
8983 w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8984 cells[0] = w0;
8985
8986 uint32_t w = cells[3];
8987 w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8988 cells[3] = w;
8989
8990 p += block_bytes;
8991 }
8992
8993 } else if (y_size >= 1) {
8994 // No need to invert a one-pixel-high image.
8995 unsigned char *p = image.p();
8996 in.read((char *)p, row_length);
8997 }
8998
8999 return image;
9000}
9001
9002/**
9003 * Called by read_dds for ATI1 compression.
9004 */
9005PTA_uchar Texture::
9006read_dds_level_bc4(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9007 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9008 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9009
9010 static const int div = 4;
9011 static const int block_bytes = 8;
9012
9013 // The ATI1 (BC4) format uses the same compression mechanism as the alpha
9014 // channel of DXT5.
9015 int num_cols = max(div, x_size) / div;
9016 int num_rows = max(div, y_size) / div;
9017 int row_length = num_cols * block_bytes;
9018 int linear_size = row_length * num_rows;
9019
9020 if (n == 0) {
9021 if (header.dds_flags & DDSD_LINEARSIZE) {
9022 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9023 }
9024 }
9025
9026 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9027
9028 if (y_size >= 4) {
9029 // We have to flip the image as we read it, because of DirectX's inverted
9030 // sense of up. That means we (a) reverse the order of the rows of blocks
9031 // . . .
9032 for (int ri = num_rows - 1; ri >= 0; --ri) {
9033 unsigned char *p = image.p() + row_length * ri;
9034 in.read((char *)p, row_length);
9035
9036 for (int ci = 0; ci < num_cols; ++ci) {
9037 // . . . and (b) within each block, we reverse the 4 individual rows
9038 // of 4 pixels. The block is one 16-bit word of reference values,
9039 // followed by six words of pixel values, in 12-bit rows. Tricky to
9040 // invert.
9041 unsigned char p2 = p[2];
9042 unsigned char p3 = p[3];
9043 unsigned char p4 = p[4];
9044 unsigned char p5 = p[5];
9045 unsigned char p6 = p[6];
9046 unsigned char p7 = p[7];
9047
9048 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9049 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9050 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9051 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9052 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9053 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9054
9055 p += block_bytes;
9056 }
9057 }
9058
9059 } else if (y_size >= 2) {
9060 // To invert a two-pixel high image, we just flip two rows within a cell.
9061 unsigned char *p = image.p();
9062 in.read((char *)p, row_length);
9063
9064 for (int ci = 0; ci < num_cols; ++ci) {
9065 unsigned char p2 = p[2];
9066 unsigned char p3 = p[3];
9067 unsigned char p4 = p[4];
9068
9069 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9070 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9071 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9072
9073 p += block_bytes;
9074 }
9075
9076 } else if (y_size >= 1) {
9077 // No need to invert a one-pixel-high image.
9078 unsigned char *p = image.p();
9079 in.read((char *)p, row_length);
9080 }
9081
9082 return image;
9083}
9084
9085/**
9086 * Called by read_dds for ATI2 compression.
9087 */
9088PTA_uchar Texture::
9089read_dds_level_bc5(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9090 int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9091 int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9092
9093 // The ATI2 (BC5) format uses the same compression mechanism as the ATI1
9094 // (BC4) format, but doubles the channels.
9095 int num_cols = max(4, x_size) / 2;
9096 int num_rows = max(4, y_size) / 4;
9097 int row_length = num_cols * 8;
9098 int linear_size = row_length * num_rows;
9099
9100 if (n == 0) {
9101 if (header.dds_flags & DDSD_LINEARSIZE) {
9102 nassertr(linear_size == (int)header.pitch, PTA_uchar());
9103 }
9104 }
9105
9106 PTA_uchar image = PTA_uchar::empty_array(linear_size);
9107
9108 if (y_size >= 4) {
9109 // We have to flip the image as we read it, because of DirectX's inverted
9110 // sense of up. That means we (a) reverse the order of the rows of blocks
9111 // . . .
9112 for (int ri = num_rows - 1; ri >= 0; --ri) {
9113 unsigned char *p = image.p() + row_length * ri;
9114 in.read((char *)p, row_length);
9115
9116 for (int ci = 0; ci < num_cols; ++ci) {
9117 // . . . and (b) within each block, we reverse the 4 individual rows
9118 // of 4 pixels. The block is one 16-bit word of reference values,
9119 // followed by six words of pixel values, in 12-bit rows. Tricky to
9120 // invert.
9121 unsigned char p2 = p[2];
9122 unsigned char p3 = p[3];
9123 unsigned char p4 = p[4];
9124 unsigned char p5 = p[5];
9125 unsigned char p6 = p[6];
9126 unsigned char p7 = p[7];
9127
9128 p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9129 p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9130 p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9131 p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9132 p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9133 p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9134
9135 p += 8;
9136 }
9137 }
9138
9139 } else if (y_size >= 2) {
9140 // To invert a two-pixel high image, we just flip two rows within a cell.
9141 unsigned char *p = image.p();
9142 in.read((char *)p, row_length);
9143
9144 for (int ci = 0; ci < num_cols; ++ci) {
9145 unsigned char p2 = p[2];
9146 unsigned char p3 = p[3];
9147 unsigned char p4 = p[4];
9148
9149 p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9150 p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9151 p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9152
9153 p += 8;
9154 }
9155
9156 } else if (y_size >= 1) {
9157 // No need to invert a one-pixel-high image.
9158 unsigned char *p = image.p();
9159 in.read((char *)p, row_length);
9160 }
9161
9162 return image;
9163}
9164
9165/**
9166 * Removes the indicated PreparedGraphicsObjects table from the Texture's
9167 * table, without actually releasing the texture. This is intended to be
9168 * called only from PreparedGraphicsObjects::release_texture(); it should
9169 * never be called by user code.
9170 */
9171void Texture::
9172clear_prepared(int view, PreparedGraphicsObjects *prepared_objects) {
9173 PreparedViews::iterator pvi;
9174 pvi = _prepared_views.find(prepared_objects);
9175 if (pvi != _prepared_views.end()) {
9176 Contexts &contexts = (*pvi).second;
9177 Contexts::iterator ci;
9178 ci = contexts.find(view);
9179 if (ci != contexts.end()) {
9180 contexts.erase(ci);
9181 }
9182
9183 if (contexts.empty()) {
9184 _prepared_views.erase(pvi);
9185 }
9186 }
9187}
9188
9189/**
9190 * Reduces the number of channels in the texture, if necessary, according to
9191 * num_channels.
9192 */
9193void Texture::
9194consider_downgrade(PNMImage &pnmimage, int num_channels, const string &name) {
9195 if (num_channels != 0 && num_channels < pnmimage.get_num_channels()) {
9196 // One special case: we can't reduce from 3 to 2 components, since that
9197 // would require adding an alpha channel.
9198 if (pnmimage.get_num_channels() == 3 && num_channels == 2) {
9199 return;
9200 }
9201
9202 gobj_cat.info()
9203 << "Downgrading " << name << " from "
9204 << pnmimage.get_num_channels() << " components to "
9205 << num_channels << ".\n";
9206 pnmimage.set_num_channels(num_channels);
9207 }
9208}
9209
9210/**
9211 * Called by generate_simple_ram_image(), this compares the two PNMImages
9212 * pixel-by-pixel. If they're similar enough (within a given threshold),
9213 * returns true.
9214 */
9215bool Texture::
9216compare_images(const PNMImage &a, const PNMImage &b) {
9217 nassertr(a.get_maxval() == 255 && b.get_maxval() == 255, false);
9218 nassertr(a.get_num_channels() == 4 && b.get_num_channels() == 4, false);
9219 nassertr(a.get_x_size() == b.get_x_size() &&
9220 a.get_y_size() == b.get_y_size(), false);
9221
9222 const xel *a_array = a.get_array();
9223 const xel *b_array = b.get_array();
9224 const xelval *a_alpha = a.get_alpha_array();
9225 const xelval *b_alpha = b.get_alpha_array();
9226
9227 int x_size = a.get_x_size();
9228
9229 int delta = 0;
9230 for (int yi = 0; yi < a.get_y_size(); ++yi) {
9231 const xel *a_row = a_array + yi * x_size;
9232 const xel *b_row = b_array + yi * x_size;
9233 const xelval *a_alpha_row = a_alpha + yi * x_size;
9234 const xelval *b_alpha_row = b_alpha + yi * x_size;
9235 for (int xi = 0; xi < x_size; ++xi) {
9236 delta += abs(PPM_GETR(a_row[xi]) - PPM_GETR(b_row[xi]));
9237 delta += abs(PPM_GETG(a_row[xi]) - PPM_GETG(b_row[xi]));
9238 delta += abs(PPM_GETB(a_row[xi]) - PPM_GETB(b_row[xi]));
9239 delta += abs(a_alpha_row[xi] - b_alpha_row[xi]);
9240 }
9241 }
9242
9243 double average_delta = (double)delta / ((double)a.get_x_size() * (double)b.get_y_size() * (double)a.get_maxval());
9244 return (average_delta <= simple_image_threshold);
9245}
9246
9247/**
9248 * Generates the next mipmap level from the previous one. If there are
9249 * multiple pages (e.g. a cube map), generates each page independently.
9250 *
9251 * x_size and y_size are the size of the previous level. They need not be a
9252 * power of 2, or even a multiple of 2.
9253 *
9254 * Assumes the lock is already held.
9255 */
9256void Texture::
9257do_filter_2d_mipmap_pages(const CData *cdata,
9258 Texture::RamImage &to, const Texture::RamImage &from,
9259 int x_size, int y_size) const {
9260 Filter2DComponent *filter_component;
9261 Filter2DComponent *filter_alpha;
9262
9263 if (is_srgb(cdata->_format)) {
9264 // We currently only support sRGB mipmap generation for unsigned byte
9265 // textures, due to our use of a lookup table.
9266 nassertv(cdata->_component_type == T_unsigned_byte);
9267
9268 if (has_sse2_sRGB_encode()) {
9269 filter_component = &filter_2d_unsigned_byte_srgb_sse2;
9270 } else {
9271 filter_component = &filter_2d_unsigned_byte_srgb;
9272 }
9273
9274 // Alpha is always linear.
9275 filter_alpha = &filter_2d_unsigned_byte;
9276
9277 } else {
9278 switch (cdata->_component_type) {
9279 case T_unsigned_byte:
9280 filter_component = &filter_2d_unsigned_byte;
9281 break;
9282
9283 case T_unsigned_short:
9284 filter_component = &filter_2d_unsigned_short;
9285 break;
9286
9287 case T_float:
9288 filter_component = &filter_2d_float;
9289 break;
9290
9291 default:
9292 gobj_cat.error()
9293 << "Unable to generate mipmaps for 2D texture with component type "
9294 << cdata->_component_type << "!";
9295 return;
9296 }
9297 filter_alpha = filter_component;
9298 }
9299
9300 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9301 size_t row_size = (size_t)x_size * pixel_size;
9302
9303 int to_x_size = max(x_size >> 1, 1);
9304 int to_y_size = max(y_size >> 1, 1);
9305
9306 size_t to_row_size = (size_t)to_x_size * pixel_size;
9307 to._page_size = (size_t)to_y_size * to_row_size;
9308 to._image = PTA_uchar::empty_array(to._page_size * cdata->_z_size * cdata->_num_views, get_class_type());
9309
9310 bool alpha = has_alpha(cdata->_format);
9311 int num_color_components = cdata->_num_components;
9312 if (alpha) {
9313 --num_color_components;
9314 }
9315
9316 int num_pages = cdata->_z_size * cdata->_num_views;
9317 for (int z = 0; z < num_pages; ++z) {
9318 // For each level.
9319 unsigned char *p = to._image.p() + z * to._page_size;
9320 nassertv(p <= to._image.p() + to._image.size() + to._page_size);
9321 const unsigned char *q = from._image.p() + z * from._page_size;
9322 nassertv(q <= from._image.p() + from._image.size() + from._page_size);
9323 if (y_size != 1) {
9324 int y;
9325 for (y = 0; y < y_size - 1; y += 2) {
9326 // For each row.
9327 nassertv(p == to._image.p() + z * to._page_size + (y / 2) * to_row_size);
9328 nassertv(q == from._image.p() + z * from._page_size + y * row_size);
9329 if (x_size != 1) {
9330 int x;
9331 for (x = 0; x < x_size - 1; x += 2) {
9332 // For each pixel.
9333 for (int c = 0; c < num_color_components; ++c) {
9334 // For each component.
9335 filter_component(p, q, pixel_size, row_size);
9336 }
9337 if (alpha) {
9338 filter_alpha(p, q, pixel_size, row_size);
9339 }
9340 q += pixel_size;
9341 }
9342 if (x < x_size) {
9343 // Skip the last odd pixel.
9344 q += pixel_size;
9345 }
9346 } else {
9347 // Just one pixel.
9348 for (int c = 0; c < num_color_components; ++c) {
9349 // For each component.
9350 filter_component(p, q, 0, row_size);
9351 }
9352 if (alpha) {
9353 filter_alpha(p, q, 0, row_size);
9354 }
9355 }
9356 q += row_size;
9358 }
9359 if (y < y_size) {
9360 // Skip the last odd row.
9361 q += row_size;
9362 }
9363 } else {
9364 // Just one row.
9365 if (x_size != 1) {
9366 int x;
9367 for (x = 0; x < x_size - 1; x += 2) {
9368 // For each pixel.
9369 for (int c = 0; c < num_color_components; ++c) {
9370 // For each component.
9371 filter_component(p, q, pixel_size, 0);
9372 }
9373 if (alpha) {
9374 filter_alpha(p, q, pixel_size, 0);
9375 }
9376 q += pixel_size;
9377 }
9378 if (x < x_size) {
9379 // Skip the last odd pixel.
9380 q += pixel_size;
9381 }
9382 } else {
9383 // Just one pixel.
9384 for (int c = 0; c < num_color_components; ++c) {
9385 // For each component.
9386 filter_component(p, q, 0, 0);
9387 }
9388 if (alpha) {
9389 filter_alpha(p, q, pixel_size, 0);
9390 }
9391 }
9392 }
9393
9394 nassertv(p == to._image.p() + (z + 1) * to._page_size);
9395 nassertv(q == from._image.p() + (z + 1) * from._page_size);
9396 }
9397}
9398
9399/**
9400 * Generates the next mipmap level from the previous one, treating all the
9401 * pages of the level as a single 3-d block of pixels.
9402 *
9403 * x_size, y_size, and z_size are the size of the previous level. They need
9404 * not be a power of 2, or even a multiple of 2.
9405 *
9406 * Assumes the lock is already held.
9407 */
9408void Texture::
9409do_filter_3d_mipmap_level(const CData *cdata,
9410 Texture::RamImage &to, const Texture::RamImage &from,
9411 int x_size, int y_size, int z_size) const {
9412 Filter3DComponent *filter_component;
9413 Filter3DComponent *filter_alpha;
9414
9415 if (is_srgb(cdata->_format)) {
9416 // We currently only support sRGB mipmap generation for unsigned byte
9417 // textures, due to our use of a lookup table.
9418 nassertv(cdata->_component_type == T_unsigned_byte);
9419
9420 if (has_sse2_sRGB_encode()) {
9421 filter_component = &filter_3d_unsigned_byte_srgb_sse2;
9422 } else {
9423 filter_component = &filter_3d_unsigned_byte_srgb;
9424 }
9425
9426 // Alpha is always linear.
9427 filter_alpha = &filter_3d_unsigned_byte;
9428
9429 } else {
9430 switch (cdata->_component_type) {
9431 case T_unsigned_byte:
9432 filter_component = &filter_3d_unsigned_byte;
9433 break;
9434
9435 case T_unsigned_short:
9436 filter_component = &filter_3d_unsigned_short;
9437 break;
9438
9439 case T_float:
9440 filter_component = &filter_3d_float;
9441 break;
9442
9443 default:
9444 gobj_cat.error()
9445 << "Unable to generate mipmaps for 3D texture with component type "
9446 << cdata->_component_type << "!";
9447 return;
9448 }
9449 filter_alpha = filter_component;
9450 }
9451
9452 size_t pixel_size = cdata->_num_components * cdata->_component_width;
9453 size_t row_size = (size_t)x_size * pixel_size;
9454 size_t page_size = (size_t)y_size * row_size;
9455 size_t view_size = (size_t)z_size * page_size;
9456
9457 int to_x_size = max(x_size >> 1, 1);
9458 int to_y_size = max(y_size >> 1, 1);
9459 int to_z_size = max(z_size >> 1, 1);
9460
9461 size_t to_row_size = (size_t)to_x_size * pixel_size;
9462 size_t to_page_size = (size_t)to_y_size * to_row_size;
9463 size_t to_view_size = (size_t)to_z_size * to_page_size;
9464 to._page_size = to_page_size;
9465 to._image = PTA_uchar::empty_array(to_page_size * to_z_size * cdata->_num_views, get_class_type());
9466
9467 bool alpha = has_alpha(cdata->_format);
9468 int num_color_components = cdata->_num_components;
9469 if (alpha) {
9470 --num_color_components;
9471 }
9472
9473 for (int view = 0; view < cdata->_num_views; ++view) {
9474 unsigned char *start_to = to._image.p() + view * to_view_size;
9475 const unsigned char *start_from = from._image.p() + view * view_size;
9476 nassertv(start_to + to_view_size <= to._image.p() + to._image.size());
9477 nassertv(start_from + view_size <= from._image.p() + from._image.size());
9478 unsigned char *p = start_to;
9479 const unsigned char *q = start_from;
9480 if (z_size != 1) {
9481 int z;
9482 for (z = 0; z < z_size - 1; z += 2) {
9483 // For each level.
9484 nassertv(p == start_to + (z / 2) * to_page_size);
9485 nassertv(q == start_from + z * page_size);
9486 if (y_size != 1) {
9487 int y;
9488 for (y = 0; y < y_size - 1; y += 2) {
9489 // For each row.
9490 nassertv(p == start_to + (z / 2) * to_page_size + (y / 2) * to_row_size);
9491 nassertv(q == start_from + z * page_size + y * row_size);
9492 if (x_size != 1) {
9493 int x;
9494 for (x = 0; x < x_size - 1; x += 2) {
9495 // For each pixel.
9496 for (int c = 0; c < num_color_components; ++c) {
9497 // For each component.
9498 filter_component(p, q, pixel_size, row_size, page_size);
9499 }
9500 if (alpha) {
9501 filter_alpha(p, q, pixel_size, row_size, page_size);
9502 }
9503 q += pixel_size;
9504 }
9505 if (x < x_size) {
9506 // Skip the last odd pixel.
9507 q += pixel_size;
9508 }
9509 } else {
9510 // Just one pixel.
9511 for (int c = 0; c < num_color_components; ++c) {
9512 // For each component.
9513 filter_component(p, q, 0, row_size, page_size);
9514 }
9515 if (alpha) {
9516 filter_alpha(p, q, 0, row_size, page_size);
9517 }
9518 }
9519 q += row_size;
9521 }
9522 if (y < y_size) {
9523 // Skip the last odd row.
9524 q += row_size;
9525 }
9526 } else {
9527 // Just one row.
9528 if (x_size != 1) {
9529 int x;
9530 for (x = 0; x < x_size - 1; x += 2) {
9531 // For each pixel.
9532 for (int c = 0; c < num_color_components; ++c) {
9533 // For each component.
9534 filter_component(p, q, pixel_size, 0, page_size);
9535 }
9536 if (alpha) {
9537 filter_alpha(p, q, pixel_size, 0, page_size);
9538 }
9539 q += pixel_size;
9540 }
9541 if (x < x_size) {
9542 // Skip the last odd pixel.
9543 q += pixel_size;
9544 }
9545 } else {
9546 // Just one pixel.
9547 for (int c = 0; c < num_color_components; ++c) {
9548 // For each component.
9549 filter_component(p, q, 0, 0, page_size);
9550 }
9551 if (alpha) {
9552 filter_alpha(p, q, 0, 0, page_size);
9553 }
9554 }
9555 }
9556 q += page_size;
9557 }
9558 if (z < z_size) {
9559 // Skip the last odd page.
9560 q += page_size;
9561 }
9562 } else {
9563 // Just one page.
9564 if (y_size != 1) {
9565 int y;
9566 for (y = 0; y < y_size - 1; y += 2) {
9567 // For each row.
9568 nassertv(p == start_to + (y / 2) * to_row_size);
9569 nassertv(q == start_from + y * row_size);
9570 if (x_size != 1) {
9571 int x;
9572 for (x = 0; x < x_size - 1; x += 2) {
9573 // For each pixel.
9574 for (int c = 0; c < num_color_components; ++c) {
9575 // For each component.
9576 filter_component(p, q, pixel_size, row_size, 0);
9577 }
9578 if (alpha) {
9579 filter_alpha(p, q, pixel_size, row_size, 0);
9580 }
9581 q += pixel_size;
9582 }
9583 if (x < x_size) {
9584 // Skip the last odd pixel.
9585 q += pixel_size;
9586 }
9587 } else {
9588 // Just one pixel.
9589 for (int c = 0; c < num_color_components; ++c) {
9590 // For each component.
9591 filter_component(p, q, 0, row_size, 0);
9592 }
9593 if (alpha) {
9594 filter_alpha(p, q, 0, row_size, 0);
9595 }
9596 }
9597 q += row_size;
9599 }
9600 if (y < y_size) {
9601 // Skip the last odd row.
9602 q += row_size;
9603 }
9604 } else {
9605 // Just one row.
9606 if (x_size != 1) {
9607 int x;
9608 for (x = 0; x < x_size - 1; x += 2) {
9609 // For each pixel.
9610 for (int c = 0; c < num_color_components; ++c) {
9611 // For each component.
9612 filter_component(p, q, pixel_size, 0, 0);
9613 }
9614 if (alpha) {
9615 filter_alpha(p, q, pixel_size, 0, 0);
9616 }
9617 q += pixel_size;
9618 }
9619 if (x < x_size) {
9620 // Skip the last odd pixel.
9621 q += pixel_size;
9622 }
9623 } else {
9624 // Just one pixel.
9625 for (int c = 0; c < num_color_components; ++c) {
9626 // For each component.
9627 filter_component(p, q, 0, 0, 0);
9628 }
9629 if (alpha) {
9630 filter_alpha(p, q, 0, 0, 0);
9631 }
9632 }
9633 }
9634 }
9635
9636 nassertv(p == start_to + to_z_size * to_page_size);
9637 nassertv(q == start_from + z_size * page_size);
9638 }
9639}
9640
9641/**
9642 * Averages a 2x2 block of pixel components into a single pixel component, for
9643 * producing the next mipmap level. Increments p and q to the next component.
9644 */
9645void Texture::
9646filter_2d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9647 size_t pixel_size, size_t row_size) {
9648 unsigned int result = ((unsigned int)q[0] +
9649 (unsigned int)q[pixel_size] +
9650 (unsigned int)q[row_size] +
9651 (unsigned int)q[pixel_size + row_size]) >> 2;
9652 *p = (unsigned char)result;
9653 ++p;
9654 ++q;
9655}
9656
9657/**
9658 * Averages a 2x2 block of pixel components into a single pixel component, for
9659 * producing the next mipmap level. Increments p and q to the next component.
9660 */
9661void Texture::
9662filter_2d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9663 size_t pixel_size, size_t row_size) {
9664 float result = (decode_sRGB_float(q[0]) +
9665 decode_sRGB_float(q[pixel_size]) +
9666 decode_sRGB_float(q[row_size]) +
9667 decode_sRGB_float(q[pixel_size + row_size]));
9668
9669 *p = encode_sRGB_uchar(result * 0.25f);
9670 ++p;
9671 ++q;
9672}
9673
9674/**
9675 * Averages a 2x2 block of pixel components into a single pixel component, for
9676 * producing the next mipmap level. Increments p and q to the next component.
9677 */
9678void Texture::
9679filter_2d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9680 size_t pixel_size, size_t row_size) {
9681 float result = (decode_sRGB_float(q[0]) +
9682 decode_sRGB_float(q[pixel_size]) +
9683 decode_sRGB_float(q[row_size]) +
9684 decode_sRGB_float(q[pixel_size + row_size]));
9685
9686 *p = encode_sRGB_uchar_sse2(result * 0.25f);
9687 ++p;
9688 ++q;
9689}
9690
9691/**
9692 * Averages a 2x2 block of pixel components into a single pixel component, for
9693 * producing the next mipmap level. Increments p and q to the next component.
9694 */
9695void Texture::
9696filter_2d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9697 size_t pixel_size, size_t row_size) {
9698 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9699 (unsigned int)*(unsigned short *)&q[pixel_size] +
9700 (unsigned int)*(unsigned short *)&q[row_size] +
9701 (unsigned int)*(unsigned short *)&q[pixel_size + row_size]) >> 2;
9702 store_unscaled_short(p, result);
9703 q += 2;
9704}
9705
9706/**
9707 * Averages a 2x2 block of pixel components into a single pixel component, for
9708 * producing the next mipmap level. Increments p and q to the next component.
9709 */
9710void Texture::
9711filter_2d_float(unsigned char *&p, const unsigned char *&q,
9712 size_t pixel_size, size_t row_size) {
9713 *(float *)p = (*(float *)&q[0] +
9714 *(float *)&q[pixel_size] +
9715 *(float *)&q[row_size] +
9716 *(float *)&q[pixel_size + row_size]) / 4.0f;
9717 p += 4;
9718 q += 4;
9719}
9720
9721/**
9722 * Averages a 2x2x2 block of pixel components into a single pixel component,
9723 * for producing the next mipmap level. Increments p and q to the next
9724 * component.
9725 */
9726void Texture::
9727filter_3d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9728 size_t pixel_size, size_t row_size, size_t page_size) {
9729 unsigned int result = ((unsigned int)q[0] +
9730 (unsigned int)q[pixel_size] +
9731 (unsigned int)q[row_size] +
9732 (unsigned int)q[pixel_size + row_size] +
9733 (unsigned int)q[page_size] +
9734 (unsigned int)q[pixel_size + page_size] +
9735 (unsigned int)q[row_size + page_size] +
9736 (unsigned int)q[pixel_size + row_size + page_size]) >> 3;
9737 *p = (unsigned char)result;
9738 ++p;
9739 ++q;
9740}
9741
9742/**
9743 * Averages a 2x2x2 block of pixel components into a single pixel component,
9744 * for producing the next mipmap level. Increments p and q to the next
9745 * component.
9746 */
9747void Texture::
9748filter_3d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9749 size_t pixel_size, size_t row_size, size_t page_size) {
9750 float result = (decode_sRGB_float(q[0]) +
9751 decode_sRGB_float(q[pixel_size]) +
9752 decode_sRGB_float(q[row_size]) +
9753 decode_sRGB_float(q[pixel_size + row_size]) +
9754 decode_sRGB_float(q[page_size]) +
9755 decode_sRGB_float(q[pixel_size + page_size]) +
9756 decode_sRGB_float(q[row_size + page_size]) +
9757 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9758
9759 *p = encode_sRGB_uchar(result * 0.125f);
9760 ++p;
9761 ++q;
9762}
9763
9764/**
9765 * Averages a 2x2x2 block of pixel components into a single pixel component,
9766 * for producing the next mipmap level. Increments p and q to the next
9767 * component.
9768 */
9769void Texture::
9770filter_3d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9771 size_t pixel_size, size_t row_size, size_t page_size) {
9772 float result = (decode_sRGB_float(q[0]) +
9773 decode_sRGB_float(q[pixel_size]) +
9774 decode_sRGB_float(q[row_size]) +
9775 decode_sRGB_float(q[pixel_size + row_size]) +
9776 decode_sRGB_float(q[page_size]) +
9777 decode_sRGB_float(q[pixel_size + page_size]) +
9778 decode_sRGB_float(q[row_size + page_size]) +
9779 decode_sRGB_float(q[pixel_size + row_size + page_size]));
9780
9781 *p = encode_sRGB_uchar_sse2(result * 0.125f);
9782 ++p;
9783 ++q;
9784}
9785
9786/**
9787 * Averages a 2x2x2 block of pixel components into a single pixel component,
9788 * for producing the next mipmap level. Increments p and q to the next
9789 * component.
9790 */
9791void Texture::
9792filter_3d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9793 size_t pixel_size, size_t row_size,
9794 size_t page_size) {
9795 unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9796 (unsigned int)*(unsigned short *)&q[pixel_size] +
9797 (unsigned int)*(unsigned short *)&q[row_size] +
9798 (unsigned int)*(unsigned short *)&q[pixel_size + row_size] +
9799 (unsigned int)*(unsigned short *)&q[page_size] +
9800 (unsigned int)*(unsigned short *)&q[pixel_size + page_size] +
9801 (unsigned int)*(unsigned short *)&q[row_size + page_size] +
9802 (unsigned int)*(unsigned short *)&q[pixel_size + row_size + page_size]) >> 3;
9803 store_unscaled_short(p, result);
9804 q += 2;
9805}
9806
9807/**
9808 * Averages a 2x2x2 block of pixel components into a single pixel component,
9809 * for producing the next mipmap level. Increments p and q to the next
9810 * component.
9811 */
9812void Texture::
9813filter_3d_float(unsigned char *&p, const unsigned char *&q,
9814 size_t pixel_size, size_t row_size, size_t page_size) {
9815 *(float *)p = (*(float *)&q[0] +
9816 *(float *)&q[pixel_size] +
9817 *(float *)&q[row_size] +
9818 *(float *)&q[pixel_size + row_size] +
9819 *(float *)&q[page_size] +
9820 *(float *)&q[pixel_size + page_size] +
9821 *(float *)&q[row_size + page_size] +
9822 *(float *)&q[pixel_size + row_size + page_size]) / 8.0f;
9823 p += 4;
9824 q += 4;
9825}
9826
9827/**
9828 * Invokes the squish library to compress the RAM image(s).
9829 */
9830bool Texture::
9831do_squish(CData *cdata, Texture::CompressionMode compression, int squish_flags) {
9832#ifdef HAVE_SQUISH
9833 if (!do_has_all_ram_mipmap_images(cdata)) {
9834 // If we're about to compress the RAM image, we should ensure that we have
9835 // all of the mipmap levels first.
9836 do_generate_ram_mipmap_images(cdata, false);
9837 }
9838
9839 RamImages compressed_ram_images;
9840 compressed_ram_images.reserve(cdata->_ram_images.size());
9841 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9842 RamImage compressed_image;
9843 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9844 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9845 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9846 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9847 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9848
9849 compressed_image._page_size = page_size;
9850 compressed_image._image = PTA_uchar::empty_array(page_size * num_pages);
9851 for (int z = 0; z < num_pages; ++z) {
9852 unsigned char *dest_page = compressed_image._image.p() + z * page_size;
9853 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * cdata->_ram_images[n]._page_size;
9854 unsigned const char *source_page_end = source_page + cdata->_ram_images[n]._page_size;
9855 // Convert one 4 x 4 cell at a time.
9856 unsigned char *d = dest_page;
9857 for (int y = 0; y < y_size; y += 4) {
9858 for (int x = 0; x < x_size; x += 4) {
9859 unsigned char tb[16 * 4];
9860 int mask = 0;
9861 unsigned char *t = tb;
9862 for (int i = 0; i < 16; ++i) {
9863 int xi = x + i % 4;
9864 int yi = y + i / 4;
9865 unsigned const char *s = source_page + (yi * x_size + xi) * cdata->_num_components;
9866 if (s < source_page_end) {
9867 switch (cdata->_num_components) {
9868 case 1:
9869 t[0] = s[0]; // r
9870 t[1] = s[0]; // g
9871 t[2] = s[0]; // b
9872 t[3] = 255; // a
9873 break;
9874
9875 case 2:
9876 t[0] = s[0]; // r
9877 t[1] = s[0]; // g
9878 t[2] = s[0]; // b
9879 t[3] = s[1]; // a
9880 break;
9881
9882 case 3:
9883 t[0] = s[2]; // r
9884 t[1] = s[1]; // g
9885 t[2] = s[0]; // b
9886 t[3] = 255; // a
9887 break;
9888
9889 case 4:
9890 t[0] = s[2]; // r
9891 t[1] = s[1]; // g
9892 t[2] = s[0]; // b
9893 t[3] = s[3]; // a
9894 break;
9895 }
9896 mask |= (1 << i);
9897 }
9898 t += 4;
9899 }
9900 squish::CompressMasked(tb, mask, d, squish_flags);
9901 d += cell_size;
9903 }
9904 }
9905 }
9906 compressed_ram_images.push_back(compressed_image);
9907 }
9908 cdata->_ram_images.swap(compressed_ram_images);
9909 cdata->_ram_image_compression = compression;
9910 return true;
9911
9912#else // HAVE_SQUISH
9913 return false;
9914
9915#endif // HAVE_SQUISH
9916}
9917
9918/**
9919 * Invokes the squish library to uncompress the RAM image(s).
9920 */
9921bool Texture::
9922do_unsquish(CData *cdata, int squish_flags) {
9923#ifdef HAVE_SQUISH
9924 RamImages uncompressed_ram_images;
9925 uncompressed_ram_images.reserve(cdata->_ram_images.size());
9926 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9927 RamImage uncompressed_image;
9928 int x_size = do_get_expected_mipmap_x_size(cdata, n);
9929 int y_size = do_get_expected_mipmap_y_size(cdata, n);
9930 int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9931 int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9932 int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9933
9934 uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
9935 uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
9936 for (int z = 0; z < num_pages; ++z) {
9937 unsigned char *dest_page = uncompressed_image._image.p() + z * uncompressed_image._page_size;
9938 unsigned char *dest_page_end = dest_page + uncompressed_image._page_size;
9939 unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * page_size;
9940 // Unconvert one 4 x 4 cell at a time.
9941 unsigned const char *s = source_page;
9942 for (int y = 0; y < y_size; y += 4) {
9943 for (int x = 0; x < x_size; x += 4) {
9944 unsigned char tb[16 * 4];
9945 squish::Decompress(tb, s, squish_flags);
9946 s += cell_size;
9947
9948 unsigned char *t = tb;
9949 for (int i = 0; i < 16; ++i) {
9950 int xi = x + i % 4;
9951 int yi = y + i / 4;
9952 unsigned char *d = dest_page + (yi * x_size + xi) * cdata->_num_components;
9953 if (d < dest_page_end) {
9954 switch (cdata->_num_components) {
9955 case 1:
9956 d[0] = t[1]; // g
9957 break;
9958
9959 case 2:
9960 d[0] = t[1]; // g
9961 d[1] = t[3]; // a
9962 break;
9963
9964 case 3:
9965 d[2] = t[0]; // r
9966 d[1] = t[1]; // g
9967 d[0] = t[2]; // b
9968 break;
9969
9970 case 4:
9971 d[2] = t[0]; // r
9972 d[1] = t[1]; // g
9973 d[0] = t[2]; // b
9974 d[3] = t[3]; // a
9975 break;
9976 }
9977 }
9978 t += 4;
9979 }
9980 }
9982 }
9983 }
9984 uncompressed_ram_images.push_back(uncompressed_image);
9985 }
9986 cdata->_ram_images.swap(uncompressed_ram_images);
9987 cdata->_ram_image_compression = CM_off;
9988 return true;
9989
9990#else // HAVE_SQUISH
9991 return false;
9992
9993#endif // HAVE_SQUISH
9994}
9995
9996/**
9997 * Factory method to generate a Texture object
9998 */
10001 BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
10002}
10003
10004/**
10005 * Function to write the important information in the particular object to a
10006 * Datagram
10007 */
10009write_datagram(BamWriter *manager, Datagram &me) {
10010 CDWriter cdata(_cycler, false);
10011
10012 bool has_rawdata = false;
10013 do_write_datagram_header(cdata, manager, me, has_rawdata);
10014 do_write_datagram_body(cdata, manager, me);
10015
10016 // If we are also including the texture's image data, then stuff it in here.
10017 if (has_rawdata) {
10018 do_write_datagram_rawdata(cdata, manager, me);
10019 }
10020}
10021
10022/**
10023 * Called by the BamReader to perform any final actions needed for setting up
10024 * the object after all objects have been read and all pointers have been
10025 * completed.
10026 */
10029 // Unref the pointer that we explicitly reffed in make_from_bam().
10030 unref();
10031
10032 // We should never get back to zero after unreffing our own count, because
10033 // we expect to have been stored in a pointer somewhere. If we do get to
10034 // zero, it's a memory leak; the way to avoid this is to call unref_delete()
10035 // above instead of unref(), but this is dangerous to do from within a
10036 // virtual function.
10037 nassertv(get_ref_count() != 0);
10038}
10039
10040
10041/**
10042 * Writes the header part of the texture to the Datagram. This is the common
10043 * part that is shared by all Texture subclasses, and contains the filename
10044 * and rawdata flags. This method is not virtual because all Texture
10045 * subclasses must write the same data at this step.
10046 *
10047 * This part must be read first before calling do_fillin_body() to determine
10048 * whether to load the Texture from the TexturePool or directly from the bam
10049 * stream.
10050 *
10051 * After this call, has_rawdata will be filled with either true or false,
10052 * according to whether we expect to write the texture rawdata to the bam
10053 * stream following the texture body.
10054 */
10055void Texture::
10056do_write_datagram_header(CData *cdata, BamWriter *manager, Datagram &me, bool &has_rawdata) {
10057 // Write out the texture's raw pixel data if (a) the current Bam Texture
10058 // Mode requires that, or (b) there's no filename, so the file can't be
10059 // loaded up from disk, but the raw pixel data is currently available in
10060 // RAM.
10061
10062 // Otherwise, we just write out the filename, and assume whoever loads the
10063 // bam file later will have access to the image file on disk.
10064 BamWriter::BamTextureMode file_texture_mode = manager->get_file_texture_mode();
10065 has_rawdata = (file_texture_mode == BamWriter::BTM_rawdata ||
10066 (cdata->_filename.empty() && do_has_bam_rawdata(cdata)));
10067 if (has_rawdata && !do_has_bam_rawdata(cdata)) {
10068 do_get_bam_rawdata(cdata);
10069 if (!do_has_bam_rawdata(cdata)) {
10070 // No image data after all.
10071 has_rawdata = false;
10072 }
10073 }
10074
10075 bool has_bam_dir = !manager->get_filename().empty();
10076 Filename bam_dir = manager->get_filename().get_dirname();
10077 Filename filename = cdata->_filename;
10078 Filename alpha_filename = cdata->_alpha_filename;
10079
10081
10082 switch (file_texture_mode) {
10083 case BamWriter::BTM_unchanged:
10084 case BamWriter::BTM_rawdata:
10085 break;
10086
10087 case BamWriter::BTM_fullpath:
10088 filename = cdata->_fullpath;
10089 alpha_filename = cdata->_alpha_fullpath;
10090 break;
10091
10092 case BamWriter::BTM_relative:
10093 filename = cdata->_fullpath;
10094 alpha_filename = cdata->_alpha_fullpath;
10095 bam_dir.make_absolute(vfs->get_cwd());
10096 if (!has_bam_dir || !filename.make_relative_to(bam_dir, true)) {
10097 filename.find_on_searchpath(get_model_path());
10098 }
10099 if (gobj_cat.is_debug()) {
10100 gobj_cat.debug()
10101 << "Texture file " << cdata->_fullpath
10102 << " found as " << filename << "\n";
10103 }
10104 if (!has_bam_dir || !alpha_filename.make_relative_to(bam_dir, true)) {
10105 alpha_filename.find_on_searchpath(get_model_path());
10106 }
10107 if (gobj_cat.is_debug()) {
10108 gobj_cat.debug()
10109 << "Alpha image " << cdata->_alpha_fullpath
10110 << " found as " << alpha_filename << "\n";
10111 }
10112 break;
10113
10114 case BamWriter::BTM_basename:
10115 filename = cdata->_fullpath.get_basename();
10116 alpha_filename = cdata->_alpha_fullpath.get_basename();
10117 break;
10118
10119 default:
10120 gobj_cat.error()
10121 << "Unsupported bam-texture-mode: " << (int)file_texture_mode << "\n";
10122 }
10123
10124 if (filename.empty()) {
10125 if (do_has_bam_rawdata(cdata) || cdata->_has_clear_color) {
10126 // If we don't have a filename, we have to store rawdata anyway.
10127 has_rawdata = true;
10128 }
10129 }
10130
10131 me.add_string(get_name());
10132 me.add_string(filename);
10133 me.add_string(alpha_filename);
10134 me.add_uint8(cdata->_primary_file_num_channels);
10135 me.add_uint8(cdata->_alpha_file_channel);
10136 me.add_bool(has_rawdata);
10137
10138 if (manager->get_file_minor_ver() < 25 &&
10139 cdata->_texture_type == TT_cube_map) {
10140 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10141 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10142 me.add_uint8(TT_2d_texture_array);
10143 } else {
10144 me.add_uint8(cdata->_texture_type);
10145 }
10146
10147 if (manager->get_file_minor_ver() >= 32) {
10148 me.add_bool(cdata->_has_read_mipmaps);
10149 }
10150}
10151
10152/**
10153 * Writes the body part of the texture to the Datagram. This is generally all
10154 * of the texture parameters except for the header and the rawdata.
10155 */
10156void Texture::
10157do_write_datagram_body(CData *cdata, BamWriter *manager, Datagram &me) {
10158 if (manager->get_file_minor_ver() >= 36) {
10159 cdata->_default_sampler.write_datagram(me);
10160 } else {
10161 const SamplerState &s = cdata->_default_sampler;
10162 me.add_uint8(s.get_wrap_u());
10163 me.add_uint8(s.get_wrap_v());
10164 me.add_uint8(s.get_wrap_w());
10165 me.add_uint8(s.get_minfilter());
10166 me.add_uint8(s.get_magfilter());
10168 s.get_border_color().write_datagram(me);
10169 }
10170
10171 me.add_uint8(cdata->_compression);
10172 me.add_uint8(cdata->_quality_level);
10173
10174 me.add_uint8(cdata->_format);
10175 me.add_uint8(cdata->_num_components);
10176
10177 if (cdata->_texture_type == TT_buffer_texture) {
10178 me.add_uint8(cdata->_usage_hint);
10179 }
10180
10181 if (manager->get_file_minor_ver() >= 28) {
10182 me.add_uint8(cdata->_auto_texture_scale);
10183 }
10184 me.add_uint32(cdata->_orig_file_x_size);
10185 me.add_uint32(cdata->_orig_file_y_size);
10186
10187 bool has_simple_ram_image = !cdata->_simple_ram_image._image.empty();
10189
10190 // Write out the simple image too, so it will be available later.
10192 me.add_uint32(cdata->_simple_x_size);
10193 me.add_uint32(cdata->_simple_y_size);
10194 me.add_int32(cdata->_simple_image_date_generated);
10195 me.add_uint32(cdata->_simple_ram_image._image.size());
10196 me.append_data(cdata->_simple_ram_image._image, cdata->_simple_ram_image._image.size());
10197 }
10198
10199 if (manager->get_file_minor_ver() >= 45) {
10200 me.add_bool(cdata->_has_clear_color);
10201 if (cdata->_has_clear_color) {
10202 cdata->_clear_color.write_datagram(me);
10203 }
10204 }
10205}
10206
10207/**
10208 * Writes the rawdata part of the texture to the Datagram.
10209 */
10210void Texture::
10211do_write_datagram_rawdata(CData *cdata, BamWriter *manager, Datagram &me) {
10212 me.add_uint32(cdata->_x_size);
10213 me.add_uint32(cdata->_y_size);
10214 me.add_uint32(cdata->_z_size);
10215
10216 if (manager->get_file_minor_ver() >= 30) {
10217 me.add_uint32(cdata->_pad_x_size);
10218 me.add_uint32(cdata->_pad_y_size);
10219 me.add_uint32(cdata->_pad_z_size);
10220 }
10221
10222 if (manager->get_file_minor_ver() >= 26) {
10223 me.add_uint32(cdata->_num_views);
10224 }
10225 me.add_uint8(cdata->_component_type);
10226 me.add_uint8(cdata->_component_width);
10227 me.add_uint8(cdata->_ram_image_compression);
10228
10229 if (cdata->_ram_images.empty() && cdata->_has_clear_color &&
10230 manager->get_file_minor_ver() < 45) {
10231 // For older .bam versions that don't support clear colors, make up a RAM
10232 // image.
10233 int image_size = do_get_expected_ram_image_size(cdata);
10234 me.add_uint8(1);
10235 me.add_uint32(do_get_expected_ram_page_size(cdata));
10236 me.add_uint32(image_size);
10237
10238 // Fill the image with the clear color.
10239 unsigned char pixel[16];
10240 const int pixel_size = do_get_clear_data(cdata, pixel);
10241 nassertv(pixel_size > 0);
10242
10243 for (int i = 0; i < image_size; i += pixel_size) {
10244 me.append_data(pixel, pixel_size);
10245 }
10246 } else {
10247 me.add_uint8(cdata->_ram_images.size());
10248 for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
10249 me.add_uint32(cdata->_ram_images[n]._page_size);
10250 me.add_uint32(cdata->_ram_images[n]._image.size());
10251 me.append_data(cdata->_ram_images[n]._image, cdata->_ram_images[n]._image.size());
10252 }
10253 }
10254}
10255
10256/**
10257 * Factory method to generate a Texture object
10258 */
10259TypedWritable *Texture::
10260make_from_bam(const FactoryParams &params) {
10261 PT(Texture) dummy = new Texture;
10262 return dummy->make_this_from_bam(params);
10263}
10264
10265/**
10266 * Called by make_from_bam() once the particular subclass of Texture is known.
10267 * This is called on a newly-constructed Texture object of the appropriate
10268 * subclass. It will return either the same Texture object (e.g. this), or a
10269 * different Texture object loaded via the TexturePool, as appropriate.
10270 */
10271TypedWritable *Texture::
10272make_this_from_bam(const FactoryParams &params) {
10273 // The process of making a texture is slightly different than making other
10274 // TypedWritable objects. That is because all creation of Textures should
10275 // be done through calls to TexturePool, which ensures that any loads of the
10276 // same filename refer to the same memory.
10277
10278 DatagramIterator scan;
10279 BamReader *manager;
10280
10281 parse_params(params, scan, manager);
10282
10283 // Get the header information--the filenames and texture type--so we can
10284 // look up the file on disk first.
10285 string name = scan.get_string();
10286 Filename filename = scan.get_string();
10287 Filename alpha_filename = scan.get_string();
10288
10289 int primary_file_num_channels = scan.get_uint8();
10290 int alpha_file_channel = scan.get_uint8();
10291 bool has_rawdata = scan.get_bool();
10292 TextureType texture_type = (TextureType)scan.get_uint8();
10293 if (manager->get_file_minor_ver() < 25) {
10294 // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10295 // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10296 if (texture_type == TT_2d_texture_array) {
10297 texture_type = TT_cube_map;
10298 }
10299 }
10300 bool has_read_mipmaps = false;
10301 if (manager->get_file_minor_ver() >= 32) {
10302 has_read_mipmaps = scan.get_bool();
10303 }
10304
10305 Texture *me = nullptr;
10306 if (has_rawdata) {
10307 // If the raw image data is included, then just load the texture directly
10308 // from the stream, and return it. In this case we return the "this"
10309 // pointer, since it's a newly-created Texture object of the appropriate
10310 // type.
10311 me = this;
10312 me->set_name(name);
10313 CDWriter cdata_me(me->_cycler, true);
10314 cdata_me->_filename = filename;
10315 cdata_me->_alpha_filename = alpha_filename;
10316 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10317 cdata_me->_alpha_file_channel = alpha_file_channel;
10318 cdata_me->_texture_type = texture_type;
10319 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10320
10321 // Read the texture attributes directly from the bam stream.
10322 me->do_fillin_body(cdata_me, scan, manager);
10323 me->do_fillin_rawdata(cdata_me, scan, manager);
10324
10325 // To manage the reference count, explicitly ref it now, then unref it in
10326 // the finalize callback.
10327 me->ref();
10328 manager->register_finalize(me);
10329
10330 } else {
10331 // The raw image data isn't included, so we'll be loading the Texture via
10332 // the TexturePool. In this case we use the "this" pointer as a temporary
10333 // object to read all of the attributes from the bam stream.
10334 Texture *dummy = this;
10335 AutoTextureScale auto_texture_scale = ATS_unspecified;
10336 bool has_simple_ram_image = false;
10337 {
10338 CDWriter cdata_dummy(dummy->_cycler, true);
10339 dummy->do_fillin_body(cdata_dummy, scan, manager);
10340 auto_texture_scale = cdata_dummy->_auto_texture_scale;
10341 has_simple_ram_image = !cdata_dummy->_simple_ram_image._image.empty();
10342 }
10343
10344 if (filename.empty()) {
10345 // This texture has no filename; since we don't have an image to load,
10346 // we can't actually create the texture.
10347 gobj_cat.info()
10348 << "Cannot create texture '" << name << "' with no filename.\n";
10349
10350 } else {
10351 // This texture does have a filename, so try to load it from disk.
10353 if (!manager->get_filename().empty()) {
10354 // If texture filename was given relative to the bam filename, expand
10355 // it now.
10356 Filename bam_dir = manager->get_filename().get_dirname();
10357 vfs->resolve_filename(filename, bam_dir);
10358 if (!alpha_filename.empty()) {
10359 vfs->resolve_filename(alpha_filename, bam_dir);
10360 }
10361 }
10362
10363 LoaderOptions options = manager->get_loader_options();
10364 if (dummy->uses_mipmaps()) {
10365 options.set_texture_flags(options.get_texture_flags() | LoaderOptions::TF_generate_mipmaps);
10366 }
10367 options.set_auto_texture_scale(auto_texture_scale);
10368
10369 switch (texture_type) {
10370 case TT_buffer_texture:
10371 case TT_1d_texture:
10372 case TT_2d_texture:
10373 case TT_1d_texture_array:
10374 // If we don't want to preload textures, and we already have a simple
10375 // RAM image (or don't need one), we don't need to load it from disk.
10376 // We do check for it in the texture pool first, though, in case it has
10377 // already been loaded.
10378 if ((options.get_texture_flags() & LoaderOptions::TF_preload) == 0 &&
10379 (has_simple_ram_image || (options.get_texture_flags() & LoaderOptions::TF_preload_simple) == 0)) {
10380 if (alpha_filename.empty()) {
10381 me = TexturePool::get_texture(filename, primary_file_num_channels,
10382 has_read_mipmaps);
10383 } else {
10384 me = TexturePool::get_texture(filename, alpha_filename,
10385 primary_file_num_channels,
10386 alpha_file_channel,
10387 has_read_mipmaps);
10388 }
10389 if (me != nullptr && me->get_texture_type() == texture_type) {
10390 // We can use this.
10391 break;
10392 }
10393
10394 // We don't have a texture, but we didn't need to preload it, so we
10395 // can just use this one. We just need to know where we can find it
10396 // when we do need to reload it.
10397 Filename fullpath = filename;
10398 Filename alpha_fullpath = alpha_filename;
10399 const DSearchPath &model_path = get_model_path();
10400 if (vfs->resolve_filename(fullpath, model_path) &&
10401 (alpha_fullpath.empty() || vfs->resolve_filename(alpha_fullpath, model_path))) {
10402 me = dummy;
10403 me->set_name(name);
10404
10405 {
10406 CDWriter cdata_me(me->_cycler, true);
10407 cdata_me->_filename = filename;
10408 cdata_me->_alpha_filename = alpha_filename;
10409 cdata_me->_fullpath = fullpath;
10410 cdata_me->_alpha_fullpath = alpha_fullpath;
10411 cdata_me->_primary_file_num_channels = primary_file_num_channels;
10412 cdata_me->_alpha_file_channel = alpha_file_channel;
10413 cdata_me->_texture_type = texture_type;
10414 cdata_me->_loaded_from_image = true;
10415 cdata_me->_has_read_mipmaps = has_read_mipmaps;
10416 }
10417
10418 // To manage the reference count, explicitly ref it now, then unref
10419 // it in the finalize callback.
10420 me->ref();
10421 manager->register_finalize(me);
10422
10423 // Do add it to the cache now, so that future uses of this same
10424 // texture are unified.
10426 return me;
10427 }
10428 }
10429 if (alpha_filename.empty()) {
10430 me = TexturePool::load_texture(filename, primary_file_num_channels,
10431 has_read_mipmaps, options);
10432 } else {
10433 me = TexturePool::load_texture(filename, alpha_filename,
10434 primary_file_num_channels,
10435 alpha_file_channel,
10436 has_read_mipmaps, options);
10437 }
10438 break;
10439
10440 case TT_3d_texture:
10441 me = TexturePool::load_3d_texture(filename, has_read_mipmaps, options);
10442 break;
10443
10444 case TT_2d_texture_array:
10445 case TT_cube_map_array:
10446 me = TexturePool::load_2d_texture_array(filename, has_read_mipmaps, options);
10447 break;
10448
10449 case TT_cube_map:
10450 me = TexturePool::load_cube_map(filename, has_read_mipmaps, options);
10451 break;
10452 }
10453 }
10454
10455 if (me != nullptr) {
10456 me->set_name(name);
10457 CDWriter cdata_me(me->_cycler, true);
10458 me->do_fillin_from(cdata_me, dummy);
10459
10460 // Since in this case me was loaded from the TexturePool, there's no
10461 // need to explicitly manage the reference count. TexturePool will hold
10462 // it safely.
10463 }
10464 }
10465
10466 return me;
10467}
10468
10469/**
10470 * Reads in the part of the Texture that was written with
10471 * do_write_datagram_body().
10472 */
10473void Texture::
10474do_fillin_body(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10475 cdata->_default_sampler.read_datagram(scan, manager);
10476
10477 if (manager->get_file_minor_ver() >= 1) {
10478 cdata->_compression = (CompressionMode)scan.get_uint8();
10479 }
10480 if (manager->get_file_minor_ver() >= 16) {
10481 cdata->_quality_level = (QualityLevel)scan.get_uint8();
10482 }
10483
10484 cdata->_format = (Format)scan.get_uint8();
10485 cdata->_num_components = scan.get_uint8();
10486
10487 if (cdata->_texture_type == TT_buffer_texture) {
10488 cdata->_usage_hint = (GeomEnums::UsageHint)scan.get_uint8();
10489 }
10490
10491 cdata->inc_properties_modified();
10492
10493 cdata->_auto_texture_scale = ATS_unspecified;
10494 if (manager->get_file_minor_ver() >= 28) {
10495 cdata->_auto_texture_scale = (AutoTextureScale)scan.get_uint8();
10496 }
10497
10498 bool has_simple_ram_image = false;
10499 if (manager->get_file_minor_ver() >= 18) {
10500 cdata->_orig_file_x_size = scan.get_uint32();
10501 cdata->_orig_file_y_size = scan.get_uint32();
10502
10504 }
10505
10507 cdata->_simple_x_size = scan.get_uint32();
10508 cdata->_simple_y_size = scan.get_uint32();
10509 cdata->_simple_image_date_generated = scan.get_int32();
10510
10511 size_t u_size = scan.get_uint32();
10512
10513 // Protect against large allocation.
10514 if (u_size > scan.get_remaining_size()) {
10515 gobj_cat.error()
10516 << "simple RAM image extends past end of datagram, is texture corrupt?\n";
10517 return;
10518 }
10519
10520 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10521 scan.extract_bytes(image.p(), u_size);
10522
10523 cdata->_simple_ram_image._image = image;
10524 cdata->_simple_ram_image._page_size = u_size;
10525 cdata->inc_simple_image_modified();
10526 }
10527
10528 if (manager->get_file_minor_ver() >= 45) {
10529 cdata->_has_clear_color = scan.get_bool();
10530 if (cdata->_has_clear_color) {
10531 cdata->_clear_color.read_datagram(scan);
10532 }
10533 }
10534}
10535
10536/**
10537 * Reads in the part of the Texture that was written with
10538 * do_write_datagram_rawdata().
10539 */
10540void Texture::
10541do_fillin_rawdata(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10542 cdata->_x_size = scan.get_uint32();
10543 cdata->_y_size = scan.get_uint32();
10544 cdata->_z_size = scan.get_uint32();
10545
10546 if (manager->get_file_minor_ver() >= 30) {
10547 cdata->_pad_x_size = scan.get_uint32();
10548 cdata->_pad_y_size = scan.get_uint32();
10549 cdata->_pad_z_size = scan.get_uint32();
10550 } else {
10551 do_set_pad_size(cdata, 0, 0, 0);
10552 }
10553
10554 cdata->_num_views = 1;
10555 if (manager->get_file_minor_ver() >= 26) {
10556 cdata->_num_views = scan.get_uint32();
10557 }
10558 cdata->_component_type = (ComponentType)scan.get_uint8();
10559 cdata->_component_width = scan.get_uint8();
10560 cdata->_ram_image_compression = CM_off;
10561 if (manager->get_file_minor_ver() >= 1) {
10562 cdata->_ram_image_compression = (CompressionMode)scan.get_uint8();
10563 }
10564
10565 int num_ram_images = 1;
10566 if (manager->get_file_minor_ver() >= 3) {
10567 num_ram_images = scan.get_uint8();
10568 }
10569
10570 cdata->_ram_images.clear();
10571 cdata->_ram_images.reserve(num_ram_images);
10572 for (int n = 0; n < num_ram_images; ++n) {
10573 cdata->_ram_images.push_back(RamImage());
10574 cdata->_ram_images[n]._page_size = get_expected_ram_page_size();
10575 if (manager->get_file_minor_ver() >= 1) {
10576 cdata->_ram_images[n]._page_size = scan.get_uint32();
10577 }
10578
10579 // fill the cdata->_image buffer with image data
10580 size_t u_size = scan.get_uint32();
10581
10582 // Protect against large allocation.
10583 if (u_size > scan.get_remaining_size()) {
10584 gobj_cat.error()
10585 << "RAM image " << n << " extends past end of datagram, is texture corrupt?\n";
10586 return;
10587 }
10588
10589 PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10590 scan.extract_bytes(image.p(), u_size);
10591
10592 cdata->_ram_images[n]._image = image;
10593 }
10594 cdata->_loaded_from_image = true;
10595 cdata->inc_image_modified();
10596}
10597
10598/**
10599 * Called in make_from_bam(), this method properly copies the attributes from
10600 * the bam stream (as stored in dummy) into this texture, updating the
10601 * modified flags appropriately.
10602 */
10603void Texture::
10604do_fillin_from(CData *cdata, const Texture *dummy) {
10605 // Use the setters instead of setting these directly, so we can correctly
10606 // avoid incrementing cdata->_properties_modified if none of these actually
10607 // change. (Otherwise, we'd have to reload the texture to the GSG every
10608 // time we loaded a new bam file that reference the texture, since each bam
10609 // file reference passes through this function.)
10610
10611 CDReader cdata_dummy(dummy->_cycler);
10612
10613 do_set_wrap_u(cdata, cdata_dummy->_default_sampler.get_wrap_u());
10614 do_set_wrap_v(cdata, cdata_dummy->_default_sampler.get_wrap_v());
10615 do_set_wrap_w(cdata, cdata_dummy->_default_sampler.get_wrap_w());
10616 do_set_border_color(cdata, cdata_dummy->_default_sampler.get_border_color());
10617
10618 if (cdata_dummy->_default_sampler.get_minfilter() != SamplerState::FT_default) {
10619 do_set_minfilter(cdata, cdata_dummy->_default_sampler.get_minfilter());
10620 }
10621 if (cdata_dummy->_default_sampler.get_magfilter() != SamplerState::FT_default) {
10622 do_set_magfilter(cdata, cdata_dummy->_default_sampler.get_magfilter());
10623 }
10624 if (cdata_dummy->_default_sampler.get_anisotropic_degree() != 0) {
10625 do_set_anisotropic_degree(cdata, cdata_dummy->_default_sampler.get_anisotropic_degree());
10626 }
10627 if (cdata_dummy->_compression != CM_default) {
10628 do_set_compression(cdata, cdata_dummy->_compression);
10629 }
10630 if (cdata_dummy->_quality_level != QL_default) {
10631 do_set_quality_level(cdata, cdata_dummy->_quality_level);
10632 }
10633
10634 Format format = cdata_dummy->_format;
10635 int num_components = cdata_dummy->_num_components;
10636
10637 if (num_components == cdata->_num_components) {
10638 // Only reset the format if the number of components hasn't changed, since
10639 // if the number of components has changed our texture no longer matches
10640 // what it was when the bam was written.
10641 do_set_format(cdata, format);
10642 }
10643
10644 if (!cdata_dummy->_simple_ram_image._image.empty()) {
10645 // Only replace the simple ram image if it was generated more recently
10646 // than the one we already have.
10647 if (cdata->_simple_ram_image._image.empty() ||
10648 cdata_dummy->_simple_image_date_generated > cdata->_simple_image_date_generated) {
10649 do_set_simple_ram_image(cdata,
10650 cdata_dummy->_simple_ram_image._image,
10651 cdata_dummy->_simple_x_size,
10652 cdata_dummy->_simple_y_size);
10653 cdata->_simple_image_date_generated = cdata_dummy->_simple_image_date_generated;
10654 }
10655 }
10656}
10657
10658/**
10659 *
10660 */
10661Texture::CData::
10662CData() {
10663 _primary_file_num_channels = 0;
10664 _alpha_file_channel = 0;
10665 _keep_ram_image = true;
10666 _compression = CM_default;
10667 _auto_texture_scale = ATS_unspecified;
10668 _ram_image_compression = CM_off;
10669 _render_to_texture = false;
10670 _match_framebuffer_format = false;
10671 _post_load_store_cache = false;
10672 _quality_level = QL_default;
10673
10674 _texture_type = TT_2d_texture;
10675 _x_size = 0;
10676 _y_size = 1;
10677 _z_size = 1;
10678 _num_views = 1;
10679 _num_components = 3;
10680 _component_width = 1;
10681 _format = F_rgb;
10682 _component_type = T_unsigned_byte;
10683
10684 // Only used for buffer textures.
10685 _usage_hint = GeomEnums::UH_unspecified;
10686
10687 _pad_x_size = 0;
10688 _pad_y_size = 0;
10689 _pad_z_size = 0;
10690
10691 _orig_file_x_size = 0;
10692 _orig_file_y_size = 0;
10693
10694 _loaded_from_image = false;
10695 _loaded_from_txo = false;
10696 _has_read_pages = false;
10697 _has_read_mipmaps = false;
10698 _num_mipmap_levels_read = 0;
10699
10700 _simple_x_size = 0;
10701 _simple_y_size = 0;
10702 _simple_ram_image._page_size = 0;
10703
10704 _has_clear_color = false;
10705}
10706
10707/**
10708 *
10709 */
10710Texture::CData::
10711CData(const Texture::CData &copy) {
10712 _num_mipmap_levels_read = 0;
10713 _render_to_texture = copy._render_to_texture;
10714 _post_load_store_cache = copy._post_load_store_cache;
10715
10716 do_assign(&copy);
10717
10718 _properties_modified = copy._properties_modified;
10719 _image_modified = copy._image_modified;
10720 _simple_image_modified = copy._simple_image_modified;
10721}
10722
10723/**
10724 *
10725 */
10726CycleData *Texture::CData::
10727make_copy() const {
10728 return new CData(*this);
10729}
10730
10731/**
10732 *
10733 */
10734void Texture::CData::
10735do_assign(const Texture::CData *copy) {
10736 _filename = copy->_filename;
10737 _alpha_filename = copy->_alpha_filename;
10738 if (!copy->_fullpath.empty()) {
10739 // Since the fullpath is often empty on a file loaded directly from a txo,
10740 // we only assign the fullpath if it is not empty.
10741 _fullpath = copy->_fullpath;
10742 _alpha_fullpath = copy->_alpha_fullpath;
10743 }
10744 _primary_file_num_channels = copy->_primary_file_num_channels;
10745 _alpha_file_channel = copy->_alpha_file_channel;
10746 _x_size = copy->_x_size;
10747 _y_size = copy->_y_size;
10748 _z_size = copy->_z_size;
10749 _num_views = copy->_num_views;
10750 _pad_x_size = copy->_pad_x_size;
10751 _pad_y_size = copy->_pad_y_size;
10752 _pad_z_size = copy->_pad_z_size;
10753 _orig_file_x_size = copy->_orig_file_x_size;
10754 _orig_file_y_size = copy->_orig_file_y_size;
10755 _num_components = copy->_num_components;
10756 _component_width = copy->_component_width;
10757 _texture_type = copy->_texture_type;
10758 _format = copy->_format;
10759 _component_type = copy->_component_type;
10760 _loaded_from_image = copy->_loaded_from_image;
10761 _loaded_from_txo = copy->_loaded_from_txo;
10762 _has_read_pages = copy->_has_read_pages;
10763 _has_read_mipmaps = copy->_has_read_mipmaps;
10764 _num_mipmap_levels_read = copy->_num_mipmap_levels_read;
10765 _default_sampler = copy->_default_sampler;
10766 _keep_ram_image = copy->_keep_ram_image;
10767 _compression = copy->_compression;
10768 _match_framebuffer_format = copy->_match_framebuffer_format;
10769 _quality_level = copy->_quality_level;
10770 _auto_texture_scale = copy->_auto_texture_scale;
10771 _ram_image_compression = copy->_ram_image_compression;
10772 _ram_images = copy->_ram_images;
10773 _simple_x_size = copy->_simple_x_size;
10774 _simple_y_size = copy->_simple_y_size;
10775 _simple_ram_image = copy->_simple_ram_image;
10776}
10777
10778/**
10779 * Writes the contents of this object to the datagram for shipping out to a
10780 * Bam file.
10781 */
10782void Texture::CData::
10783write_datagram(BamWriter *manager, Datagram &dg) const {
10784}
10785
10786/**
10787 * Receives an array of pointers, one for each time manager->read_pointer()
10788 * was called in fillin(). Returns the number of pointers processed.
10789 */
10790int Texture::CData::
10791complete_pointers(TypedWritable **p_list, BamReader *manager) {
10792 return 0;
10793}
10794
10795/**
10796 * This internal function is called by make_from_bam to read in all of the
10797 * relevant data from the BamFile for the new Geom.
10798 */
10799void Texture::CData::
10800fillin(DatagramIterator &scan, BamReader *manager) {
10801}
10802
10803/**
10804 *
10805 */
10806ostream &
10807operator << (ostream &out, Texture::TextureType tt) {
10808 return out << Texture::format_texture_type(tt);
10809}
10810
10811/**
10812 *
10813 */
10814ostream &
10815operator << (ostream &out, Texture::ComponentType ct) {
10816 return out << Texture::format_component_type(ct);
10817}
10818
10819/**
10820 *
10821 */
10822ostream &
10823operator << (ostream &out, Texture::Format f) {
10824 return out << Texture::format_format(f);
10825}
10826
10827/**
10828 *
10829 */
10830ostream &
10831operator << (ostream &out, Texture::CompressionMode cm) {
10832 return out << Texture::format_compression_mode(cm);
10833}
10834
10835/**
10836 *
10837 */
10838ostream &
10839operator << (ostream &out, Texture::QualityLevel tql) {
10840 return out << Texture::format_quality_level(tql);
10841}
10842
10843/**
10844 *
10845 */
10846istream &
10847operator >> (istream &in, Texture::QualityLevel &tql) {
10848 string word;
10849 in >> word;
10850
10852 return in;
10853}
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This class represents a thread-safe handle to a promised future result of an asynchronous operation,...
Definition asyncFuture.h:61
An instance of this class is written to the front of a Bam or Txo file to make the file a cached inst...
void add_dependent_file(const Filename &pathname)
Adds the indicated file to the list of files that will be loaded to generate the data in this record.
get_data
Returns a pointer to the data stored in the record, or NULL if there is no data.
set_data
Stores a new data object on the record.
This class maintains a cache of Bam and/or Txo objects generated from model files and texture images ...
Definition bamCache.h:42
get_cache_textures
Returns whether texture files (e.g.
Definition bamCache.h:90
bool store(BamCacheRecord *record)
Flushes a cache entry to disk.
Definition bamCache.cxx:194
static BamCache * get_global_ptr()
Returns a pointer to the global BamCache object, which is used automatically by the ModelPool and Tex...
Definition bamCache.I:223
get_cache_compressed_textures
Returns whether compressed texture files will be stored in the cache, as compressed txo files.
Definition bamCache.h:92
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
bool resolve()
This may be called at any time during processing of the Bam file to resolve all the known pointers so...
bool init()
Initializes the BamReader prior to reading any objects from its source.
Definition bamReader.cxx:85
get_filename
If a BAM is a file, then the BamReader should contain the name of the file.
Definition bamReader.h:155
TypedWritable * read_object()
Reads a single object from the Bam file.
get_loader_options
Returns the LoaderOptions passed to the loader when the model was requested, if any.
Definition bamReader.h:156
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being read.
Definition bamReader.I:83
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition bamWriter.h:63
get_file_texture_mode
Returns the BamTextureMode preference indicated by the Bam file currently being written.
Definition bamWriter.h:99
get_filename
If a BAM is a file, then the BamWriter should contain the name of the file.
Definition bamWriter.h:95
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being written.
Definition bamWriter.I:59
get_active
Returns the active flag associated with this object.
get_resident
Returns the resident flag associated with this object.
get_data_size_bytes
Returns the number of bytes previously reported for the data object.
void notify_all()
Informs all of the other threads who are currently blocked on wait() that the relevant condition has ...
void wait()
Waits on the condition.
This class specializes ConfigVariable as an enumerated type.
int get_word(size_t n) const
Returns the variable's nth value.
std::string get_unique_value(size_t n) const
Returns the nth unique value of the variable.
size_t get_num_unique_values() const
Returns the number of unique values in the variable.
PointerToArray< Element > cast_non_const() const
Casts away the constness of the CPTA(Element), and returns an equivalent PTA(Element).
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
This template class calls PipelineCycler::read() in the constructor and PipelineCycler::release_read(...
This template class calls PipelineCycler::read_unlocked(), and then provides a transparent read-only ...
This template class calls PipelineCycler::write() in the constructor and PipelineCycler::release_writ...
A single page of data maintained by a PipelineCycler.
Definition cycleData.h:50
This class stores a list of directories that can be searched, in order, to locate a particular file.
Definition dSearchPath.h:28
This class can be used to read a binary file that consists of an arbitrary header followed by a numbe...
bool read_header(std::string &header, size_t num_bytes)
Reads a sequence of bytes from the beginning of the datagram file.
bool open(const FileReference *file)
Opens the indicated filename for reading.
A class to retrieve the individual data elements previously stored in a Datagram.
uint8_t get_uint8()
Extracts an unsigned 8-bit integer.
vector_uchar extract_bytes(size_t size)
Extracts the indicated number of bytes in the datagram and returns them as a string.
uint32_t get_uint32()
Extracts an unsigned 32-bit integer.
bool get_bool()
Extracts a boolean value.
std::string get_string()
Extracts a variable-length string.
int32_t get_int32()
Extracts a signed 32-bit integer.
size_t get_remaining_size() const
Return the bytes left in the datagram.
This class can be used to write a binary file that consists of an arbitrary header followed by a numb...
bool open(const FileReference *file)
Opens the indicated filename for writing.
bool write_header(const vector_uchar &header)
Writes a sequence of bytes to the beginning of the datagram file.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition datagram.h:38
void add_uint32(uint32_t value)
Adds an unsigned 32-bit integer to the datagram.
Definition datagram.I:94
void add_int16(int16_t value)
Adds a signed 16-bit integer to the datagram.
Definition datagram.I:58
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition datagram.I:67
void add_uint8(uint8_t value)
Adds an unsigned 8-bit integer to the datagram.
Definition datagram.I:50
void add_bool(bool value)
Adds a boolean value to the datagram.
Definition datagram.I:34
void append_data(const void *data, size_t size)
Appends some more raw data to the end of the datagram.
Definition datagram.cxx:129
void add_string(const std::string &str)
Adds a variable-length string to the datagram.
Definition datagram.I:219
An instance of this class is passed to the Factory when requesting it to do its business and construc...
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition factory.I:73
The name of a file, such as a texture file or an Egg file.
Definition filename.h:44
std::string get_basename() const
Returns the basename part of the filename.
Definition filename.I:367
Filename get_filename_index(int index) const
If the pattern flag is set for this Filename and the filename string actually includes a sequence of ...
Definition filename.cxx:836
bool has_hash() const
Returns true if the filename is indicated to be a filename pattern (that is, set_pattern(true) was ca...
Definition filename.I:531
void set_basename_wo_extension(const std::string &s)
Replaces the basename part of the filename, without the file extension.
Definition filename.cxx:783
int find_on_searchpath(const DSearchPath &searchpath)
Performs the reverse of the resolve_filename() operation: assuming that the current filename is fully...
bool make_relative_to(Filename directory, bool allow_backups=true)
Adjusts this filename, which must be a fully-specified pathname beginning with a slash,...
std::string get_basename_wo_extension() const
Returns the basename part of the filename, without the file extension.
Definition filename.I:386
void make_absolute()
Converts the filename to a fully-qualified pathname from the root (if it is a relative pathname),...
Definition filename.cxx:968
static Filename pattern_filename(const std::string &filename)
Constructs a filename that represents a sequence of numbered files.
Definition filename.I:160
This class can be used to test for string matches against standard Unix- shell filename globbing conv...
Definition globPattern.h:32
bool matches(const std::string &candidate) const
Returns true if the candidate string matches the pattern, false otherwise.
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
static GraphicsStateGuardianBase * get_default_gsg()
Returns a pointer to the "default" GSG.
Encodes a string name in a hash table, mapping it to a pointer.
get_name
Returns the complete name represented by the InternalName and all of its parents.
Specifies parameters that may be passed to the loader.
set_auto_texture_scale
Set this flag to ATS_none, ATS_up, ATS_down, or ATS_pad to control how a texture is scaled from disk ...
get_auto_texture_scale
See set_auto_texture_scale().
get_texture_num_views
See set_texture_num_views().
void unlock()
Alias for release() to match C++11 semantics.
Definition mutexDirect.I:39
void lock()
Alias for acquire() to match C++11 semantics.
Definition mutexDirect.I:19
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
Definition mutexHolder.h:25
A base class for all things which can have a name.
Definition namable.h:26
bool has_name() const
Returns true if the Namable has a nonempty name set, false if the name is empty.
Definition namable.I:44
static PNMFileTypeRegistry * get_global_ptr()
Returns a pointer to the global PNMFileTypeRegistry object.
PNMFileType * get_type_from_extension(const std::string &filename) const
Tries to determine what the PNMFileType is likely to be for a particular image file based on its exte...
This is the base class of a family of classes that represent particular image file types that PNMImag...
Definition pnmFileType.h:32
get_maxval
Returns the maximum channel value allowable for any pixel in this image; for instance,...
int get_x_size() const
Returns the number of pixels in the X direction.
PNMReader * make_reader(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true) const
Returns a newly-allocated PNMReader of the suitable type for reading from the indicated image filenam...
static bool is_grayscale(ColorType color_type)
This static variant of is_grayscale() returns true if the indicated image type represents a grayscale...
get_num_channels
Returns the number of channels in the image.
static bool has_alpha(ColorType color_type)
This static variant of has_alpha() returns true if the indicated image type includes an alpha channel...
int get_y_size() const
Returns the number of pixels in the Y direction.
get_type
If the file type is known (e.g.
The name of this class derives from the fact that we originally implemented it as a layer on top of t...
Definition pnmImage.h:58
void clear()
Frees all memory allocated for the image, and clears all its parameters (size, color,...
Definition pnmImage.cxx:48
void set_read_size(int x_size, int y_size)
Specifies the size to we'd like to scale the image upon reading it.
Definition pnmImage.I:288
xelval get_channel_val(int x, int y, int channel) const
Returns the nth component color at the indicated pixel.
Definition pnmImage.cxx:837
void set_blue(int x, int y, float b)
Sets the blue component color only at the indicated pixel.
Definition pnmImage.I:836
void alpha_fill(float alpha=0.0)
Sets the entire alpha channel to the given level.
Definition pnmImage.I:272
xelval get_green_val(int x, int y) const
Returns the green component color at the indicated pixel.
Definition pnmImage.I:462
void set_green(int x, int y, float g)
Sets the green component color only at the indicated pixel.
Definition pnmImage.I:827
float get_alpha(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition pnmImage.I:809
float get_gray(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition pnmImage.I:799
void quick_filter_from(const PNMImage &copy, int xborder=0, int yborder=0)
Resizes from the given image, with a fixed radius of 0.5.
void fill(float red, float green, float blue)
Sets the entire image (except the alpha channel) to the given color.
Definition pnmImage.I:246
void set_num_channels(int num_channels)
Changes the number of channels associated with the image.
Definition pnmImage.I:353
xelval get_alpha_val(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition pnmImage.I:494
void set_red(int x, int y, float r)
Sets the red component color only at the indicated pixel.
Definition pnmImage.I:818
void copy_header_from(const PNMImageHeader &header)
Copies just the header information into this image.
Definition pnmImage.cxx:200
void take_from(PNMImage &orig)
Move the contents of the other image into this one, and empty the other image.
Definition pnmImage.cxx:224
bool is_valid() const
Returns true if the image has been read in or correctly initialized with a height and width.
Definition pnmImage.I:342
xelval get_blue_val(int x, int y) const
Returns the blue component color at the indicated pixel.
Definition pnmImage.I:472
bool read(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true)
Reads the indicated image filename.
Definition pnmImage.cxx:278
xel * get_array()
Directly access the underlying PNMImage array.
Definition pnmImage.I:1098
xelval get_red_val(int x, int y) const
Returns the red component color at the indicated pixel.
Definition pnmImage.I:452
int get_read_y_size() const
Returns the requested y_size of the image if set_read_size() has been called, or the image y_size oth...
Definition pnmImage.I:324
xelval get_gray_val(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition pnmImage.I:484
void set_alpha(int x, int y, float a)
Sets the alpha component color only at the indicated pixel.
Definition pnmImage.I:859
ColorSpace get_color_space() const
Returns the color space in which the image is encoded.
Definition pnmImage.I:332
void add_alpha()
Adds an alpha channel to the image, if it does not already have one.
Definition pnmImage.I:363
xelval * get_alpha_array()
Directly access the underlying PNMImage array of alpha values.
Definition pnmImage.I:1115
bool write(const Filename &filename, PNMFileType *type=nullptr) const
Writes the image to the indicated filename.
Definition pnmImage.cxx:385
int get_read_x_size() const
Returns the requested x_size of the image if set_read_size() has been called, or the image x_size oth...
Definition pnmImage.I:315
This is an abstract base class that defines the interface for reading image files of various types.
Definition pnmReader.h:27
virtual bool is_floating_point()
Returns true if this PNMFileType represents a floating-point image type, false if it is a normal,...
Definition pnmReader.cxx:71
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition pStatTimer.h:30
Defines a pfm file, a 2-d table of floating-point numbers, either 3-component or 1-component,...
Definition pfmFile.h:31
bool read(const Filename &fullpath)
Reads the PFM data from the indicated file, returning true on success, false on failure.
Definition pfmFile.cxx:121
bool write(const Filename &fullpath)
Writes the PFM data to the indicated file, returning true on success, false on failure.
Definition pfmFile.cxx:204
bool store(PNMImage &pnmimage) const
Copies the data to the indicated PNMImage, converting to RGB values.
Definition pfmFile.cxx:360
void set_channel(int x, int y, int c, PN_float32 value)
Replaces the cth channel of the point value at the indicated point.
Definition pfmFile.I:63
bool load(const PNMImage &pnmimage)
Fills the PfmFile with the data from the indicated PNMImage, converted to floating-point values.
Definition pfmFile.cxx:287
PN_float32 get_channel(int x, int y, int c) const
Returns the cth channel of the point value at the indicated point.
Definition pfmFile.I:52
void clear()
Eliminates all data in the file.
Definition pfmFile.cxx:77
A table of objects that are saved within the graphics context for reference by handle later.
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
void ref() const
Explicitly increments the reference count.
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
Represents a set of settings that indicate how a texture is sampled.
get_minfilter
Returns the filter mode of the texture for minification.
get_wrap_v
Returns the wrap mode of the texture in the V direction.
get_anisotropic_degree
Returns the degree of anisotropic filtering that should be applied to the texture.
get_magfilter
Returns the filter mode of the texture for magnification.
get_wrap_w
Returns the wrap mode of the texture in the W direction.
get_wrap_u
Returns the wrap mode of the texture in the U direction.
get_border_color
Returns the solid color of the texture's border.
A class to read sequential binary data directly from an istream.
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool was_image_modified() const
Returns true if the texture image has been modified since the last time mark_loaded() was called.
An instance of this object is returned by Texture::peek().
static Texture * load_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads the given filename up into a texture, if it has not already been loaded, and returns the new te...
Definition texturePool.I:70
static Texture * get_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false)
Returns the texture that has already been previously loaded, or NULL otherwise.
Definition texturePool.I:41
static Texture * load_2d_texture_array(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 2-D texture array that is specified with a series of n pages, all numbered in sequence,...
static void add_texture(Texture *texture)
Adds the indicated already-loaded texture to the pool.
static Texture * load_cube_map(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a cube map texture that is specified with a series of 6 pages, numbered 0 through 5.
static Texture * load_3d_texture(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 3-D texture that is specified with a series of n pages, all numbered in sequence,...
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition texture.h:72
CPTA_uchar get_ram_image_as(const std::string &requested_format)
Returns the uncompressed system-RAM image data associated with the texture.
Definition texture.cxx:7397
static TextureType string_texture_type(const std::string &str)
Returns the TextureType corresponding to the indicated string word.
Definition texture.cxx:2103
virtual void ensure_loader_type(const Filename &filename)
May be called prior to calling read_txo() or any bam-related Texture- creating callback,...
Definition texture.cxx:2836
static PT(Texture) make_from_txo(std bool write_txo(std::ostream &out, const std::string &filename="") const
Writes the texture to a Panda texture object.
Definition texture.cxx:928
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition texture.cxx:1981
static std::string format_component_type(ComponentType ct)
Returns the indicated ComponentType converted to a string word.
Definition texture.cxx:2129
Texture(const std::string &name=std::string())
Constructs an empty texture.
Definition texture.cxx:375
bool get_resident(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture is reported to be resident within graphics memory for the indicated GSG.
Definition texture.cxx:1546
Texture * load_related(const InternalName *suffix) const
Loads a texture whose filename is derived by concatenating a suffix to the filename of this texture.
Definition texture.cxx:973
static CompressionMode string_compression_mode(const std::string &str)
Returns the CompressionMode value associated with the given string representation.
Definition texture.cxx:2462
PTA_uchar new_simple_ram_image(int x_size, int y_size)
Creates an empty array for the simple ram image of the indicated size, and returns a modifiable point...
Definition texture.cxx:1303
static bool is_specific(CompressionMode compression)
Returns true if the indicated compression mode is one of the specific compression types,...
Definition texture.cxx:2616
bool has_ram_image() const
Returns true if the Texture has its image contents available in main RAM, false if it exists only in ...
Definition texture.I:1243
static std::string format_quality_level(QualityLevel tql)
Returns the indicated QualityLevel converted to a string word.
Definition texture.cxx:2505
size_t estimate_texture_memory() const
Estimates the amount of texture memory that will be consumed by loading this texture.
Definition texture.cxx:675
bool read(const Filename &fullpath, const LoaderOptions &options=LoaderOptions())
Reads the named filename into the texture.
Definition texture.cxx:551
void consider_rescale(PNMImage &pnmimage)
Asks the PNMImage to change its scale when it reads the image, according to the whims of the Config....
Definition texture.cxx:2039
get_texture_type
Returns the overall interpretation of the texture.
Definition texture.h:366
bool write(const Filename &fullpath)
Writes the texture to the named filename.
Definition texture.I:299
static bool has_binary_alpha(Format format)
Returns true if the indicated format includes a binary alpha only, false otherwise.
Definition texture.cxx:2663
void * get_ram_mipmap_pointer(int n) const
Similiar to get_ram_mipmap_image(), however, in this case the void pointer for the given ram image is...
Definition texture.cxx:1228
static std::string format_compression_mode(CompressionMode cm)
Returns the indicated CompressionMode converted to a string word.
Definition texture.cxx:2420
get_aux_data
Returns a record previously recorded via set_aux_data().
Definition texture.h:555
static bool is_srgb(Format format)
Returns true if the indicated format is in the sRGB color space, false otherwise.
Definition texture.cxx:2678
void set_orig_file_size(int x, int y, int z=1)
Specifies the size of the texture as it exists in its original disk file, before any Panda scaling.
Definition texture.cxx:1961
bool get_active(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture was rendered in the most recent frame within the indicated GSG.
Definition texture.cxx:1519
get_keep_ram_image
Returns the flag that indicates whether this Texture is eligible to have its main RAM copy of the tex...
Definition texture.h:473
bool read_dds(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a DDS file object.
Definition texture.cxx:943
void generate_normalization_cube_map(int size)
Generates a special cube map image in the texture that can be used to apply bump mapping effects: for...
Definition texture.cxx:424
bool has_compression() const
Returns true if the texture indicates it wants to be compressed, either with CM_on or higher,...
Definition texture.I:1103
static QualityLevel string_quality_level(const std::string &str)
Returns the QualityLevel value associated with the given string representation.
Definition texture.cxx:2525
void generate_alpha_scale_map()
Generates a special 256x1 1-d texture that can be used to apply an arbitrary alpha scale to objects b...
Definition texture.cxx:526
bool read_txo(std::istream &in, const std::string &filename="")
Reads the texture from a Panda texture object.
Definition texture.cxx:845
static ComponentType string_component_type(const std::string &str)
Returns the ComponentType corresponding to the indicated string word.
Definition texture.cxx:2158
static void register_with_read_factory()
Factory method to generate a Texture object.
static bool adjust_size(int &x_size, int &y_size, const std::string &name, bool for_padding, AutoTextureScale auto_texture_scale=ATS_unspecified)
Computes the proper size of the texture, based on the original size, the filename,...
Definition texture.cxx:2726
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
static int up_to_power_2(int value)
Returns the smallest power of 2 greater than or equal to value.
Definition texture.cxx:2008
static AutoTextureScale get_textures_power_2()
This flag returns ATS_none, ATS_up, or ATS_down and controls the scaling of textures in general.
Definition texture.I:1864
get_auto_texture_scale
Returns the power-of-2 texture-scaling mode that will be applied to this particular texture when it i...
Definition texture.h:535
void set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size)
Accepts a raw pointer cast as an int, which is then passed to set_ram_mipmap_pointer(); see the docum...
Definition texture.cxx:1269
virtual void write_datagram(BamWriter *manager, Datagram &me)
Function to write the important information in the particular object to a Datagram.
static int down_to_power_2(int value)
Returns the largest power of 2 less than or equal to value.
Definition texture.cxx:2020
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition texture.cxx:1573
virtual bool has_cull_callback() const
Should be overridden by derived classes to return true if cull_callback() has been defined.
Definition texture.cxx:2574
bool uses_mipmaps() const
Returns true if the minfilter settings on this texture indicate the use of mipmapping,...
Definition texture.I:1128
static std::string format_texture_type(TextureType tt)
Returns the indicated TextureType converted to a string word.
Definition texture.cxx:2077
has_simple_ram_image
Returns true if the Texture has a "simple" image available in main RAM.
Definition texture.h:520
static bool is_integer(Format format)
Returns true if the indicated format is an integer format, false otherwise.
Definition texture.cxx:2695
PTA_uchar modify_simple_ram_image()
Returns a modifiable pointer to the internal "simple" texture image.
Definition texture.cxx:1292
void clear_ram_mipmap_image(int n)
Discards the current system-RAM image for the nth mipmap level.
Definition texture.cxx:1277
bool was_image_modified(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture needs to be re-loaded onto the indicated GSG, either because its image da...
Definition texture.cxx:1460
bool read_ktx(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a KTX file object.
Definition texture.cxx:960
size_t get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const
Returns the number of bytes which the texture is reported to consume within graphics memory,...
Definition texture.cxx:1492
get_expected_ram_page_size
Returns the number of bytes that should be used per each Z page of the 3-d texture.
Definition texture.h:450
virtual bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
If has_cull_callback() returns true, this function will be called during the cull traversal to perfor...
Definition texture.cxx:2588
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition texture.cxx:1444
void set_ram_image_as(CPTA_uchar image, const std::string &provided_format)
Replaces the current system-RAM image with the new data, converting it first if necessary from the in...
Definition texture.cxx:1025
void set_ram_mipmap_pointer(int n, void *image, size_t page_size=0)
Sets an explicit void pointer as the texture's mipmap image for the indicated level.
Definition texture.cxx:1247
set_aux_data
Records an arbitrary object in the Texture, associated with a specified key.
Definition texture.h:555
void texture_uploaded()
This method is called by the GraphicsEngine at the beginning of the frame *after* a texture has been ...
Definition texture.cxx:2551
void set_size_padded(int x=1, int y=1, int z=1)
Changes the size of the texture, padding if necessary, and setting the pad region as well.
Definition texture.cxx:1932
static bool has_alpha(Format format)
Returns true if the indicated format includes alpha, false otherwise.
Definition texture.cxx:2632
get_num_loadable_ram_mipmap_images
Returns the number of contiguous mipmap levels that exist in RAM, up until the first gap in the seque...
Definition texture.h:505
void generate_simple_ram_image()
Computes the "simple" ram image by loading the main RAM image, if it is not already available,...
Definition texture.cxx:1324
static Format string_format(const std::string &str)
Returns the Format corresponding to the indicated string word.
Definition texture.cxx:2302
clear_aux_data
Removes a record previously recorded via set_aux_data().
Definition texture.h:555
int release_all()
Frees the context allocated on all objects for which the texture has been declared.
Definition texture.cxx:1599
CPTA_uchar get_ram_mipmap_image(int n) const
Returns the system-RAM image data associated with the nth mipmap level, if present.
Definition texture.cxx:1214
static std::string format_format(Format f)
Returns the indicated Format converted to a string word.
Definition texture.cxx:2188
is_cacheable
Returns true if there is enough information in this Texture object to write it to the bam cache succe...
Definition texture.h:474
static bool is_unsigned(ComponentType ctype)
Returns true if the indicated component type is unsigned, false otherwise.
Definition texture.cxx:2604
A thread; that is, a lightweight process.
Definition thread.h:46
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition thread.I:212
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition thread.h:109
TypeHandle is the identifier used to differentiate C++ class types.
Definition typeHandle.h:81
bool is_exact_type(TypeHandle handle) const
Returns true if the current object is the indicated type exactly.
Definition typedObject.I:38
bool is_of_type(TypeHandle handle) const
Returns true if the current object is or derives from the indicated type.
Definition typedObject.I:28
A base class for things which need to inherit from both TypedObject and from ReferenceCount.
Base class for objects that can be written to and read from Bam files.
A hierarchy of directories and files that appears to be one continuous file system,...
static void close_write_file(std::ostream *stream)
Closes a file opened by a previous call to open_write_file().
Filename get_cwd() const
Returns the current directory name.
bool exists(const Filename &filename) const
Convenience function; returns true if the named file exists in the virtual file system hierarchy.
bool resolve_filename(Filename &filename, const DSearchPath &searchpath, const std::string &default_extension=std::string()) const
Searches the given search path for the filename.
std::ostream * open_write_file(const Filename &filename, bool auto_wrap, bool truncate)
Convenience function; returns a newly allocated ostream if the file exists and can be written,...
static void close_read_file(std::istream *stream)
Closes a file opened by a previous call to open_read_file().
PointerTo< VirtualFile > get_file(const Filename &filename, bool status_only=false) const
Looks up the file by the indicated name in the file system.
static VirtualFileSystem * get_global_ptr()
Returns the default global VirtualFileSystem.
The abstract base class for a file or directory within the VirtualFileSystem.
Definition virtualFile.h:35
This is our own Panda specialization on the default STL map.
Definition pmap.h:49
This is our own Panda specialization on the default STL vector.
Definition pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
BEGIN_PUBLISH EXPCL_PANDA_PNMIMAGE float decode_sRGB_float(unsigned char val)
Decodes the sRGB-encoded unsigned char value to a linearized float in the range 0-1.
EXPCL_PANDA_PNMIMAGE unsigned char encode_sRGB_uchar(unsigned char val)
Encodes the linearized unsigned char value to an sRGB-encoded unsigned char value.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition indent.cxx:20
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition pbitops.I:175
int get_next_higher_bit(unsigned short x)
Returns the smallest power of 2 greater than x.
Definition pbitops.I:328
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
string upcase(const string &s)
Returns the input string with all lowercase letters converted to uppercase.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void release_read(const CycleData *pointer) const
Releases a pointer previously obtained via a call to read().
CycleDataType * write_upstream(bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::write_upstream().
CycleDataType * elevate_read_upstream(const CycleDataType *pointer, bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::elevate_read_upstream().
const CycleDataType * read(Thread *current_thread) const
See PipelineCyclerBase::read().
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.