Panda3D
texture.cxx
Go to the documentation of this file.
1 /**
2  * PANDA 3D SOFTWARE
3  * Copyright (c) Carnegie Mellon University. All rights reserved.
4  *
5  * All use of this software is subject to the terms of the revised BSD
6  * license. You should have received a copy of this license along
7  * with this source code in a file named "LICENSE."
8  *
9  * @file texture.cxx
10  * @author mike
11  * @date 1997-01-09
12  * @author fperazzi, PandaSE
13  * @date 2010-04-29
14  */
15 
16 #include "pandabase.h"
17 #include "texture.h"
18 #include "config_gobj.h"
19 #include "config_putil.h"
20 #include "texturePool.h"
21 #include "textureContext.h"
22 #include "bamCache.h"
23 #include "bamCacheRecord.h"
24 #include "datagram.h"
25 #include "datagramIterator.h"
26 #include "bamReader.h"
27 #include "bamWriter.h"
28 #include "string_utils.h"
30 #include "pnmImage.h"
31 #include "pnmReader.h"
32 #include "pfmFile.h"
33 #include "virtualFileSystem.h"
34 #include "datagramInputFile.h"
35 #include "datagramOutputFile.h"
36 #include "bam.h"
37 #include "zStream.h"
38 #include "indent.h"
39 #include "cmath.h"
40 #include "pStatTimer.h"
41 #include "pbitops.h"
42 #include "streamReader.h"
43 #include "texturePeeker.h"
44 #include "convert_srgb.h"
45 
46 #ifdef HAVE_SQUISH
47 #include <squish.h>
48 #endif // HAVE_SQUISH
49 
50 #include <stddef.h>
51 
52 using std::endl;
53 using std::istream;
54 using std::max;
55 using std::min;
56 using std::ostream;
57 using std::string;
58 using std::swap;
59 
61 ("texture-quality-level", Texture::QL_normal,
62  PRC_DESC("This specifies a global quality level for all textures. You "
63  "may specify either fastest, normal, or best. This actually "
64  "affects the meaning of Texture::set_quality_level(QL_default), "
65  "so it may be overridden on a per-texture basis. This generally "
66  "only has an effect when using the tinydisplay software renderer; "
67  "it has little or no effect on normal, hardware-accelerated "
68  "renderers. See Texture::set_quality_level()."));
69 
70 PStatCollector Texture::_texture_read_pcollector("*:Texture:Read");
71 TypeHandle Texture::_type_handle;
72 TypeHandle Texture::CData::_type_handle;
73 AutoTextureScale Texture::_textures_power_2 = ATS_unspecified;
74 
75 // Stuff to read and write DDS files.
76 
77 // little-endian, of course
78 #define DDS_MAGIC 0x20534444
79 
80 
81 // DDS_header.dwFlags
82 #define DDSD_CAPS 0x00000001
83 #define DDSD_HEIGHT 0x00000002
84 #define DDSD_WIDTH 0x00000004
85 #define DDSD_PITCH 0x00000008
86 #define DDSD_PIXELFORMAT 0x00001000
87 #define DDSD_MIPMAPCOUNT 0x00020000
88 #define DDSD_LINEARSIZE 0x00080000
89 #define DDSD_DEPTH 0x00800000
90 
91 // DDS_header.sPixelFormat.dwFlags
92 #define DDPF_ALPHAPIXELS 0x00000001
93 #define DDPF_FOURCC 0x00000004
94 #define DDPF_INDEXED 0x00000020
95 #define DDPF_RGB 0x00000040
96 
97 // DDS_header.sCaps.dwCaps1
98 #define DDSCAPS_COMPLEX 0x00000008
99 #define DDSCAPS_TEXTURE 0x00001000
100 #define DDSCAPS_MIPMAP 0x00400000
101 
102 // DDS_header.sCaps.dwCaps2
103 #define DDSCAPS2_CUBEMAP 0x00000200
104 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
105 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
106 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
107 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
108 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
109 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
110 #define DDSCAPS2_VOLUME 0x00200000
111 
112 struct DDSPixelFormat {
113  unsigned int pf_size;
114  unsigned int pf_flags;
115  unsigned int four_cc;
116  unsigned int rgb_bitcount;
117  unsigned int r_mask;
118  unsigned int g_mask;
119  unsigned int b_mask;
120  unsigned int a_mask;
121 };
122 
123 struct DDSCaps2 {
124  unsigned int caps1;
125  unsigned int caps2;
126  unsigned int ddsx;
127 };
128 
129 struct DDSHeader {
130  unsigned int dds_magic;
131  unsigned int dds_size;
132  unsigned int dds_flags;
133  unsigned int height;
134  unsigned int width;
135  unsigned int pitch;
136  unsigned int depth;
137  unsigned int num_levels;
138 
139  DDSPixelFormat pf;
140  DDSCaps2 caps;
141 };
142 
143 // Stuff to read KTX files.
144 enum KTXType {
145  KTX_BYTE = 0x1400,
146  KTX_UNSIGNED_BYTE = 0x1401,
147  KTX_SHORT = 0x1402,
148  KTX_UNSIGNED_SHORT = 0x1403,
149  KTX_INT = 0x1404,
150  KTX_UNSIGNED_INT = 0x1405,
151  KTX_FLOAT = 0x1406,
152  KTX_HALF_FLOAT = 0x140B,
153  KTX_UNSIGNED_BYTE_3_3_2 = 0x8032,
154  KTX_UNSIGNED_SHORT_4_4_4_4 = 0x8033,
155  KTX_UNSIGNED_SHORT_5_5_5_1 = 0x8034,
156  KTX_UNSIGNED_INT_8_8_8_8 = 0x8035,
157  KTX_UNSIGNED_INT_10_10_10_2 = 0x8036,
158  KTX_UNSIGNED_BYTE_2_3_3_REV = 0x8362,
159  KTX_UNSIGNED_SHORT_5_6_5 = 0x8363,
160  KTX_UNSIGNED_SHORT_5_6_5_REV = 0x8364,
161  KTX_UNSIGNED_SHORT_4_4_4_4_REV = 0x8365,
162  KTX_UNSIGNED_SHORT_1_5_5_5_REV = 0x8366,
163  KTX_UNSIGNED_INT_8_8_8_8_REV = 0x8367,
164  KTX_UNSIGNED_INT_2_10_10_10_REV = 0x8368,
165  KTX_UNSIGNED_INT_24_8 = 0x84FA,
166  KTX_UNSIGNED_INT_10F_11F_11F_REV = 0x8C3B,
167  KTX_UNSIGNED_INT_5_9_9_9_REV = 0x8C3E,
168  KTX_FLOAT_32_UNSIGNED_INT_24_8_REV = 0x8DAD,
169 };
170 
171 enum KTXFormat {
172  KTX_ALPHA = 0x1906,
173  KTX_ALPHA12 = 0x803D,
174  KTX_ALPHA16 = 0x803E,
175  KTX_ALPHA16_SNORM = 0x9018,
176  KTX_ALPHA4 = 0x803B,
177  KTX_ALPHA8 = 0x803C,
178  KTX_ALPHA8_SNORM = 0x9014,
179  KTX_ALPHA_SNORM = 0x9010,
180  KTX_BGR = 0x80E0,
181  KTX_BGR_INTEGER = 0x8D9A,
182  KTX_BGRA = 0x80E1,
183  KTX_BGRA_INTEGER = 0x8D9B,
184  KTX_BLUE = 0x1905,
185  KTX_BLUE_INTEGER = 0x8D96,
186  KTX_COLOR_INDEX = 0x1900,
187  KTX_DEPTH24_STENCIL8 = 0x88F0,
188  KTX_DEPTH32F_STENCIL8 = 0x8CAD,
189  KTX_DEPTH_COMPONENT = 0x1902,
190  KTX_DEPTH_COMPONENT16 = 0x81A5,
191  KTX_DEPTH_COMPONENT24 = 0x81A6,
192  KTX_DEPTH_COMPONENT32 = 0x81A7,
193  KTX_DEPTH_COMPONENT32F = 0x8CAC,
194  KTX_DEPTH_STENCIL = 0x84F9,
195  KTX_GREEN = 0x1904,
196  KTX_GREEN_INTEGER = 0x8D95,
197  KTX_INTENSITY = 0x8049,
198  KTX_INTENSITY12 = 0x804C,
199  KTX_INTENSITY16 = 0x804D,
200  KTX_INTENSITY16_SNORM = 0x901B,
201  KTX_INTENSITY4 = 0x804A,
202  KTX_INTENSITY8 = 0x804B,
203  KTX_INTENSITY8_SNORM = 0x9017,
204  KTX_INTENSITY_SNORM = 0x9013,
205  KTX_LUMINANCE = 0x1909,
206  KTX_LUMINANCE12 = 0x8041,
207  KTX_LUMINANCE12_ALPHA12 = 0x8047,
208  KTX_LUMINANCE12_ALPHA4 = 0x8046,
209  KTX_LUMINANCE16 = 0x8042,
210  KTX_LUMINANCE16_ALPHA16 = 0x8048,
211  KTX_LUMINANCE16_ALPHA16_SNORM = 0x901A,
212  KTX_LUMINANCE16_SNORM = 0x9019,
213  KTX_LUMINANCE4 = 0x803F,
214  KTX_LUMINANCE4_ALPHA4 = 0x8043,
215  KTX_LUMINANCE6_ALPHA2 = 0x8044,
216  KTX_LUMINANCE8 = 0x8040,
217  KTX_LUMINANCE8_ALPHA8 = 0x8045,
218  KTX_LUMINANCE8_ALPHA8_SNORM = 0x9016,
219  KTX_LUMINANCE8_SNORM = 0x9015,
220  KTX_LUMINANCE_ALPHA = 0x190A,
221  KTX_LUMINANCE_ALPHA_SNORM = 0x9012,
222  KTX_LUMINANCE_SNORM = 0x9011,
223  KTX_R11F_G11F_B10F = 0x8C3A,
224  KTX_R16 = 0x822A,
225  KTX_R16_SNORM = 0x8F98,
226  KTX_R16F = 0x822D,
227  KTX_R16I = 0x8233,
228  KTX_R16UI = 0x8234,
229  KTX_R32F = 0x822E,
230  KTX_R32I = 0x8235,
231  KTX_R32UI = 0x8236,
232  KTX_R3_G3_B2 = 0x2A10,
233  KTX_R8 = 0x8229,
234  KTX_R8_SNORM = 0x8F94,
235  KTX_R8I = 0x8231,
236  KTX_R8UI = 0x8232,
237  KTX_RED = 0x1903,
238  KTX_RED_INTEGER = 0x8D94,
239  KTX_RED_SNORM = 0x8F90,
240  KTX_RG = 0x8227,
241  KTX_RG16 = 0x822C,
242  KTX_RG16_SNORM = 0x8F99,
243  KTX_RG16F = 0x822F,
244  KTX_RG16I = 0x8239,
245  KTX_RG16UI = 0x823A,
246  KTX_RG32F = 0x8230,
247  KTX_RG32I = 0x823B,
248  KTX_RG32UI = 0x823C,
249  KTX_RG8 = 0x822B,
250  KTX_RG8_SNORM = 0x8F95,
251  KTX_RG8I = 0x8237,
252  KTX_RG8UI = 0x8238,
253  KTX_RG_INTEGER = 0x8228,
254  KTX_RG_SNORM = 0x8F91,
255  KTX_RGB = 0x1907,
256  KTX_RGB10 = 0x8052,
257  KTX_RGB10_A2 = 0x8059,
258  KTX_RGB12 = 0x8053,
259  KTX_RGB16 = 0x8054,
260  KTX_RGB16_SNORM = 0x8F9A,
261  KTX_RGB16F = 0x881B,
262  KTX_RGB16I = 0x8D89,
263  KTX_RGB16UI = 0x8D77,
264  KTX_RGB2 = 0x804E,
265  KTX_RGB32F = 0x8815,
266  KTX_RGB32I = 0x8D83,
267  KTX_RGB32UI = 0x8D71,
268  KTX_RGB4 = 0x804F,
269  KTX_RGB5 = 0x8050,
270  KTX_RGB5_A1 = 0x8057,
271  KTX_RGB8 = 0x8051,
272  KTX_RGB8_SNORM = 0x8F96,
273  KTX_RGB8I = 0x8D8F,
274  KTX_RGB8UI = 0x8D7D,
275  KTX_RGB9_E5 = 0x8C3D,
276  KTX_RGB_INTEGER = 0x8D98,
277  KTX_RGB_SNORM = 0x8F92,
278  KTX_RGBA = 0x1908,
279  KTX_RGBA12 = 0x805A,
280  KTX_RGBA16 = 0x805B,
281  KTX_RGBA16_SNORM = 0x8F9B,
282  KTX_RGBA16F = 0x881A,
283  KTX_RGBA16I = 0x8D88,
284  KTX_RGBA16UI = 0x8D76,
285  KTX_RGBA2 = 0x8055,
286  KTX_RGBA32F = 0x8814,
287  KTX_RGBA32I = 0x8D82,
288  KTX_RGBA32UI = 0x8D70,
289  KTX_RGBA4 = 0x8056,
290  KTX_RGBA8 = 0x8058,
291  KTX_RGBA8_SNORM = 0x8F97,
292  KTX_RGBA8I = 0x8D8E,
293  KTX_RGBA8UI = 0x8D7C,
294  KTX_RGBA_INTEGER = 0x8D99,
295  KTX_RGBA_SNORM = 0x8F93,
296  KTX_SLUMINANCE = 0x8C46,
297  KTX_SLUMINANCE8 = 0x8C47,
298  KTX_SLUMINANCE8_ALPHA8 = 0x8C45,
299  KTX_SLUMINANCE_ALPHA = 0x8C44,
300  KTX_SRGB = 0x8C40,
301  KTX_SRGB8 = 0x8C41,
302  KTX_SRGB8_ALPHA8 = 0x8C43,
303  KTX_SRGB_ALPHA = 0x8C42,
304  KTX_STENCIL_INDEX = 0x1901,
305  KTX_STENCIL_INDEX1 = 0x8D46,
306  KTX_STENCIL_INDEX16 = 0x8D49,
307  KTX_STENCIL_INDEX4 = 0x8D47,
308  KTX_STENCIL_INDEX8 = 0x8D48,
309 };
310 
311 enum KTXCompressedFormat {
312  KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2 = 0x8C72,
313  KTX_COMPRESSED_LUMINANCE_LATC1 = 0x8C70,
314  KTX_COMPRESSED_R11_EAC = 0x9270,
315  KTX_COMPRESSED_RED = 0x8225,
316  KTX_COMPRESSED_RED_RGTC1 = 0x8DBB,
317  KTX_COMPRESSED_RG = 0x8226,
318  KTX_COMPRESSED_RG11_EAC = 0x9272,
319  KTX_COMPRESSED_RG_RGTC2 = 0x8DBD,
320  KTX_COMPRESSED_RGB = 0x84ED,
321  KTX_COMPRESSED_RGB8_ETC2 = 0x9274,
322  KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9276,
323  KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT = 0x8E8E,
324  KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT = 0x8E8F,
325  KTX_COMPRESSED_RGB_FXT1_3DFX = 0x86B0,
326  KTX_COMPRESSED_RGB_PVRTC_2BPPV1_IMG = 0x8C01,
327  KTX_COMPRESSED_RGB_PVRTC_4BPPV1_IMG = 0x8C00,
328  KTX_COMPRESSED_RGB_S3TC_DXT1 = 0x83F0,
329  KTX_COMPRESSED_RGBA = 0x84EE,
330  KTX_COMPRESSED_RGBA8_ETC2_EAC = 0x9278,
331  KTX_COMPRESSED_RGBA_BPTC_UNORM = 0x8E8C,
332  KTX_COMPRESSED_RGBA_FXT1_3DFX = 0x86B1,
333  KTX_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG = 0x8C03,
334  KTX_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG = 0x9137,
335  KTX_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG = 0x8C02,
336  KTX_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG = 0x9138,
337  KTX_COMPRESSED_RGBA_S3TC_DXT1 = 0x83F1,
338  KTX_COMPRESSED_RGBA_S3TC_DXT3 = 0x83F2,
339  KTX_COMPRESSED_RGBA_S3TC_DXT5 = 0x83F3,
340  KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2 = 0x8C73,
341  KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1 = 0x8C71,
342  KTX_COMPRESSED_SIGNED_R11_EAC = 0x9271,
343  KTX_COMPRESSED_SIGNED_RED_RGTC1 = 0x8DBC,
344  KTX_COMPRESSED_SIGNED_RG11_EAC = 0x9273,
345  KTX_COMPRESSED_SIGNED_RG_RGTC2 = 0x8DBE,
346  KTX_COMPRESSED_SLUMINANCE = 0x8C4A,
347  KTX_COMPRESSED_SLUMINANCE_ALPHA = 0x8C4B,
348  KTX_COMPRESSED_SRGB = 0x8C48,
349  KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC = 0x9279,
350  KTX_COMPRESSED_SRGB8_ETC2 = 0x9275,
351  KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 = 0x9277,
352  KTX_COMPRESSED_SRGB_ALPHA = 0x8C49,
353  KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM = 0x8E8D,
354  KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1 = 0x8A56,
355  KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2 = 0x93F0,
356  KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1 = 0x8A57,
357  KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2 = 0x93F1,
358  KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1 = 0x8C4D,
359  KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3 = 0x8C4E,
360  KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5 = 0x8C4F,
361  KTX_COMPRESSED_SRGB_PVRTC_2BPPV1 = 0x8A54,
362  KTX_COMPRESSED_SRGB_PVRTC_4BPPV1 = 0x8A55,
363  KTX_COMPRESSED_SRGB_S3TC_DXT1 = 0x8C4C,
364  KTX_ETC1_RGB8 = 0x8D64,
365  KTX_ETC1_SRGB8 = 0x88EE,
366 };
367 
368 /**
369  * Constructs an empty texture. The default is to set up the texture as an
370  * empty 2-d texture; follow up with one of the variants of setup_texture() if
371  * this is not what you want.
372  */
374 Texture(const string &name) :
375  Namable(name),
376  _lock(name),
377  _cvar(_lock)
378 {
379  _reloading = false;
380 
381  CDWriter cdata(_cycler, true);
382  do_set_format(cdata, F_rgb);
383  do_set_component_type(cdata, T_unsigned_byte);
384 }
385 
386 /**
387  * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
388  */
389 Texture::
390 Texture(const Texture &copy) :
391  Namable(copy),
392  _cycler(copy._cycler),
393  _lock(copy.get_name()),
394  _cvar(_lock)
395 {
396  _reloading = false;
397 }
398 
399 /**
400  * Use Texture::make_copy() to make a duplicate copy of an existing Texture.
401  */
402 void Texture::
403 operator = (const Texture &copy) {
404  Namable::operator = (copy);
405  _cycler = copy._cycler;
406 }
407 
408 /**
409  *
410  */
411 Texture::
412 ~Texture() {
413  release_all();
414  nassertv(!_reloading);
415 }
416 
417 /**
418  * Generates a special cube map image in the texture that can be used to apply
419  * bump mapping effects: for each texel in the cube map that is indexed by the
420  * 3-d texture coordinates (x, y, z), the resulting value is the normalized
421  * vector (x, y, z) (compressed from -1..1 into 0..1).
422  */
425  CDWriter cdata(_cycler, true);
426  do_setup_texture(cdata, TT_cube_map, size, size, 6, T_unsigned_byte, F_rgb);
427  PTA_uchar image = do_make_ram_image(cdata);
428  cdata->_keep_ram_image = true;
429 
430  cdata->inc_image_modified();
431  cdata->inc_properties_modified();
432 
433  PN_stdfloat half_size = (PN_stdfloat)size * 0.5f;
434  PN_stdfloat center = half_size - 0.5f;
435 
436  LMatrix4 scale
437  (127.5f, 0.0f, 0.0f, 0.0f,
438  0.0f, 127.5f, 0.0f, 0.0f,
439  0.0f, 0.0f, 127.5f, 0.0f,
440  127.5f, 127.5f, 127.5f, 1.0f);
441 
442  unsigned char *p = image;
443  int xi, yi;
444 
445  // Page 0: positive X.
446  for (yi = 0; yi < size; ++yi) {
447  for (xi = 0; xi < size; ++xi) {
448  LVector3 vec(half_size, center - yi, center - xi);
449  vec.normalize();
450  vec = scale.xform_point(vec);
451 
452  *p++ = (unsigned char)vec[2];
453  *p++ = (unsigned char)vec[1];
454  *p++ = (unsigned char)vec[0];
455  }
456  }
457 
458  // Page 1: negative X.
459  for (yi = 0; yi < size; ++yi) {
460  for (xi = 0; xi < size; ++xi) {
461  LVector3 vec(-half_size, center - yi, xi - center);
462  vec.normalize();
463  vec = scale.xform_point(vec);
464  *p++ = (unsigned char)vec[2];
465  *p++ = (unsigned char)vec[1];
466  *p++ = (unsigned char)vec[0];
467  }
468  }
469 
470  // Page 2: positive Y.
471  for (yi = 0; yi < size; ++yi) {
472  for (xi = 0; xi < size; ++xi) {
473  LVector3 vec(xi - center, half_size, yi - center);
474  vec.normalize();
475  vec = scale.xform_point(vec);
476  *p++ = (unsigned char)vec[2];
477  *p++ = (unsigned char)vec[1];
478  *p++ = (unsigned char)vec[0];
479  }
480  }
481 
482  // Page 3: negative Y.
483  for (yi = 0; yi < size; ++yi) {
484  for (xi = 0; xi < size; ++xi) {
485  LVector3 vec(xi - center, -half_size, center - yi);
486  vec.normalize();
487  vec = scale.xform_point(vec);
488  *p++ = (unsigned char)vec[2];
489  *p++ = (unsigned char)vec[1];
490  *p++ = (unsigned char)vec[0];
491  }
492  }
493 
494  // Page 4: positive Z.
495  for (yi = 0; yi < size; ++yi) {
496  for (xi = 0; xi < size; ++xi) {
497  LVector3 vec(xi - center, center - yi, half_size);
498  vec.normalize();
499  vec = scale.xform_point(vec);
500  *p++ = (unsigned char)vec[2];
501  *p++ = (unsigned char)vec[1];
502  *p++ = (unsigned char)vec[0];
503  }
504  }
505 
506  // Page 5: negative Z.
507  for (yi = 0; yi < size; ++yi) {
508  for (xi = 0; xi < size; ++xi) {
509  LVector3 vec(center - xi, center - yi, -half_size);
510  vec.normalize();
511  vec = scale.xform_point(vec);
512  *p++ = (unsigned char)vec[2];
513  *p++ = (unsigned char)vec[1];
514  *p++ = (unsigned char)vec[0];
515  }
516  }
517 }
518 
519 /**
520  * Generates a special 256x1 1-d texture that can be used to apply an
521  * arbitrary alpha scale to objects by judicious use of texture matrix. The
522  * texture is a gradient, with an alpha of 0 on the left (U = 0), and 255 on
523  * the right (U = 1).
524  */
527  CDWriter cdata(_cycler, true);
528  do_setup_texture(cdata, TT_1d_texture, 256, 1, 1, T_unsigned_byte, F_alpha);
529  cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
530  cdata->_default_sampler.set_minfilter(SamplerState::FT_nearest);
531  cdata->_default_sampler.set_magfilter(SamplerState::FT_nearest);
532 
533  cdata->_compression = CM_off;
534 
535  cdata->inc_image_modified();
536  cdata->inc_properties_modified();
537 
538  PTA_uchar image = do_make_ram_image(cdata);
539  cdata->_keep_ram_image = true;
540 
541  unsigned char *p = image;
542  for (int xi = 0; xi < 256; ++xi) {
543  *p++ = xi;
544  }
545 }
546 
547 /**
548  * Reads the named filename into the texture.
549  */
551 read(const Filename &fullpath, const LoaderOptions &options) {
552  CDWriter cdata(_cycler, true);
553  do_clear(cdata);
554  cdata->inc_properties_modified();
555  cdata->inc_image_modified();
556  return do_read(cdata, fullpath, Filename(), 0, 0, 0, 0, false, false,
557  options, nullptr);
558 }
559 
560 /**
561  * Combine a 3-component image with a grayscale image to get a 4-component
562  * image.
563  *
564  * See the description of the full-parameter read() method for the meaning of
565  * the primary_file_num_channels and alpha_file_channel parameters.
566  */
568 read(const Filename &fullpath, const Filename &alpha_fullpath,
569  int primary_file_num_channels, int alpha_file_channel,
570  const LoaderOptions &options) {
571  CDWriter cdata(_cycler, true);
572  do_clear(cdata);
573  cdata->inc_properties_modified();
574  cdata->inc_image_modified();
575  return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
576  alpha_file_channel, 0, 0, false, false,
577  options, nullptr);
578 }
579 
580 /**
581  * Reads a single file into a single page or mipmap level, or automatically
582  * reads a series of files into a series of pages and/or mipmap levels.
583  *
584  * See the description of the full-parameter read() method for the meaning of
585  * the various parameters.
586  */
588 read(const Filename &fullpath, int z, int n,
589  bool read_pages, bool read_mipmaps,
590  const LoaderOptions &options) {
591  CDWriter cdata(_cycler, true);
592  cdata->inc_properties_modified();
593  cdata->inc_image_modified();
594  return do_read(cdata, fullpath, Filename(), 0, 0, z, n, read_pages, read_mipmaps,
595  options, nullptr);
596 }
597 
598 /**
599  * Reads the texture from the indicated filename. If
600  * primary_file_num_channels is not 0, it specifies the number of components
601  * to downgrade the image to if it is greater than this number.
602  *
603  * If the filename has the extension .txo, this implicitly reads a texture
604  * object instead of a filename (which replaces all of the texture
605  * properties). In this case, all the rest of the parameters are ignored, and
606  * the filename should not contain any hash marks; just the one named file
607  * will be read, since a single .txo file can contain all pages and mipmaps
608  * necessary to define a texture.
609  *
610  * If alpha_fullpath is not empty, it specifies the name of a file from which
611  * to retrieve the alpha. In this case, alpha_file_channel represents the
612  * numeric channel of this image file to use as the resulting texture's alpha
613  * channel; usually, this is 0 to indicate the grayscale combination of r, g,
614  * b; or it may be a one-based channel number, e.g. 1 for the red channel, 2
615  * for the green channel, and so on.
616  *
617  * If read pages is false, then z indicates the page number into which this
618  * image will be assigned. Normally this is 0 for the first (or only) page of
619  * the texture. 3-D textures have one page for each level of depth, and cube
620  * map textures always have six pages.
621  *
622  * If read_pages is true, multiple images will be read at once, one for each
623  * page of a cube map or a 3-D texture. In this case, the filename should
624  * contain a sequence of one or more hash marks ("#") which will be filled in
625  * with the z value of each page, zero-based. In this case, the z parameter
626  * indicates the maximum z value that will be loaded, or 0 to load all
627  * filenames that exist.
628  *
629  * If read_mipmaps is false, then n indicates the mipmap level to which this
630  * image will be assigned. Normally this is 0 for the base texture image, but
631  * it is possible to load custom mipmap levels into the later images. After
632  * the base texture image is loaded (thus defining the size of the texture),
633  * you can call get_expected_num_mipmap_levels() to determine the maximum
634  * sensible value for n.
635  *
636  * If read_mipmaps is true, multiple images will be read as above, but this
637  * time the images represent the different mipmap levels of the texture image.
638  * In this case, the n parameter indicates the maximum n value that will be
639  * loaded, or 0 to load all filenames that exist (up to the expected number of
640  * mipmap levels).
641  *
642  * If both read_pages and read_mipmaps is true, then both sequences will be
643  * read; the filename should contain two sequences of hash marks, separated by
644  * some character such as a hyphen, underscore, or dot. The first hash mark
645  * sequence will be filled in with the mipmap level, while the second hash
646  * mark sequence will be the page index.
647  *
648  * This method implicitly sets keep_ram_image to false.
649  */
651 read(const Filename &fullpath, const Filename &alpha_fullpath,
652  int primary_file_num_channels, int alpha_file_channel,
653  int z, int n, bool read_pages, bool read_mipmaps,
654  BamCacheRecord *record,
655  const LoaderOptions &options) {
656  CDWriter cdata(_cycler, true);
657  cdata->inc_properties_modified();
658  cdata->inc_image_modified();
659  return do_read(cdata, fullpath, alpha_fullpath, primary_file_num_channels,
660  alpha_file_channel, z, n, read_pages, read_mipmaps,
661  options, record);
662 }
663 
664 /**
665  * Estimates the amount of texture memory that will be consumed by loading
666  * this texture. This returns a value that is not specific to any particular
667  * graphics card or driver; it tries to make a reasonable assumption about how
668  * a driver will load the texture. It does not account for texture
669  * compression or anything fancy. This is mainly useful for debugging and
670  * reporting purposes.
671  *
672  * Returns a value in bytes.
673  */
674 size_t Texture::
675 estimate_texture_memory() const {
676  CDReader cdata(_cycler);
677  size_t pixels = cdata->_x_size * cdata->_y_size * cdata->_z_size;
678 
679  size_t bpp = 0;
680  switch (cdata->_format) {
681  case Texture::F_rgb332:
682  bpp = 1;
683  break;
684 
685  case Texture::F_alpha:
686  case Texture::F_red:
687  case Texture::F_green:
688  case Texture::F_blue:
689  case Texture::F_luminance:
690  case Texture::F_sluminance:
691  case Texture::F_r8i:
692  bpp = 1;
693  break;
694 
695  case Texture::F_luminance_alpha:
696  case Texture::F_luminance_alphamask:
697  case Texture::F_sluminance_alpha:
698  case Texture::F_rgba4:
699  case Texture::F_rgb5:
700  case Texture::F_rgba5:
701  case Texture::F_rg:
702  bpp = 2;
703  break;
704 
705  case Texture::F_rgba:
706  case Texture::F_rgbm:
707  case Texture::F_rgb:
708  case Texture::F_srgb:
709  // Most of the above formats have only 3 bytes, but they are most likely
710  // to get padded by the driver
711  bpp = 4;
712  break;
713 
714  case Texture::F_color_index:
715  case Texture::F_rgb8:
716  case Texture::F_rgba8:
717  case Texture::F_srgb_alpha:
718  case Texture::F_rgb8i:
719  case Texture::F_rgba8i:
720  bpp = 4;
721  break;
722 
723  case Texture::F_depth_stencil:
724  bpp = 4;
725  break;
726 
727  case Texture::F_depth_component:
728  case Texture::F_depth_component16:
729  bpp = 2;
730  break;
731 
732  case Texture::F_depth_component24: // Gets padded
733  case Texture::F_depth_component32:
734  bpp = 4;
735  break;
736 
737  case Texture::F_rgba12:
738  case Texture::F_rgb12:
739  bpp = 8;
740  break;
741 
742  case Texture::F_rgba32:
743  case Texture::F_rgba32i:
744  bpp = 16;
745  break;
746 
747  case Texture::F_r16:
748  case Texture::F_r16i:
749  case Texture::F_rg8i:
750  bpp = 2;
751  break;
752  case Texture::F_rg16:
753  case Texture::F_rg16i:
754  bpp = 4;
755  break;
756  case Texture::F_rgb16:
757  case Texture::F_rgb16i:
758  case Texture::F_rgba16:
759  case Texture::F_rgba16i:
760  bpp = 8;
761  break;
762 
763  case Texture::F_r32i:
764  case Texture::F_r32:
765  bpp = 4;
766  break;
767 
768  case Texture::F_rg32:
769  case Texture::F_rg32i:
770  bpp = 8;
771  break;
772 
773  case Texture::F_rgb32:
774  case Texture::F_rgb32i:
775  bpp = 16;
776  break;
777 
778  case Texture::F_r11_g11_b10:
779  case Texture::F_rgb9_e5:
780  case Texture::F_rgb10_a2:
781  bpp = 4;
782  break;
783  }
784 
785  if (bpp == 0) {
786  bpp = 4;
787  gobj_cat.warning() << "Unhandled format in estimate_texture_memory(): "
788  << cdata->_format << "\n";
789  }
790 
791  size_t bytes = pixels * bpp;
792  if (uses_mipmaps()) {
793  bytes = (bytes * 4) / 3;
794  }
795 
796  return bytes;
797 }
798 
799 /**
800  * Records an arbitrary object in the Texture, associated with a specified
801  * key. The object may later be retrieved by calling get_aux_data() with the
802  * same key.
803  *
804  * These data objects are not recorded to a bam or txo file.
805  */
806 void Texture::
807 set_aux_data(const string &key, TypedReferenceCount *aux_data) {
808  MutexHolder holder(_lock);
809  _aux_data[key] = aux_data;
810 }
811 
812 /**
813  * Removes a record previously recorded via set_aux_data().
814  */
815 void Texture::
816 clear_aux_data(const string &key) {
817  MutexHolder holder(_lock);
818  _aux_data.erase(key);
819 }
820 
821 /**
822  * Returns a record previously recorded via set_aux_data(). Returns NULL if
823  * there was no record associated with the indicated key.
824  */
826 get_aux_data(const string &key) const {
827  MutexHolder holder(_lock);
828  AuxData::const_iterator di;
829  di = _aux_data.find(key);
830  if (di != _aux_data.end()) {
831  return (*di).second;
832  }
833  return nullptr;
834 }
835 
836 /**
837  * Reads the texture from a Panda texture object. This defines the complete
838  * Texture specification, including the image data as well as all texture
839  * properties. This only works if the txo file contains a static Texture
840  * image, as opposed to a subclass of Texture such as a movie texture.
841  *
842  * Pass a real filename if it is available, or empty string if it is not.
843  */
845 read_txo(istream &in, const string &filename) {
846  CDWriter cdata(_cycler, true);
847  cdata->inc_properties_modified();
848  cdata->inc_image_modified();
849  return do_read_txo(cdata, in, filename);
850 }
851 
852 /**
853  * Constructs a new Texture object from the txo file. This is similar to
854  * Texture::read_txo(), but it constructs and returns a new object, which
855  * allows it to return a subclass of Texture (for instance, a movie texture).
856  *
857  * Pass a real filename if it is available, or empty string if it is not.
858  */
859 PT(Texture) Texture::
860 make_from_txo(istream &in, const string &filename) {
861  DatagramInputFile din;
862 
863  if (!din.open(in, filename)) {
864  gobj_cat.error()
865  << "Could not read texture object: " << filename << "\n";
866  return nullptr;
867  }
868 
869  string head;
870  if (!din.read_header(head, _bam_header.size())) {
871  gobj_cat.error()
872  << filename << " is not a texture object file.\n";
873  return nullptr;
874  }
875 
876  if (head != _bam_header) {
877  gobj_cat.error()
878  << filename << " is not a texture object file.\n";
879  return nullptr;
880  }
881 
882  BamReader reader(&din);
883  if (!reader.init()) {
884  return nullptr;
885  }
886 
887  TypedWritable *object = reader.read_object();
888 
889  if (object != nullptr &&
890  object->is_exact_type(BamCacheRecord::get_class_type())) {
891  // Here's a special case: if the first object in the file is a
892  // BamCacheRecord, it's really a cache data file and not a true txo file;
893  // but skip over the cache data record and let the user treat it like an
894  // ordinary txo file.
895  object = reader.read_object();
896  }
897 
898  if (object == nullptr) {
899  gobj_cat.error()
900  << "Texture object " << filename << " is empty.\n";
901  return nullptr;
902 
903  } else if (!object->is_of_type(Texture::get_class_type())) {
904  gobj_cat.error()
905  << "Texture object " << filename << " contains a "
906  << object->get_type() << ", not a Texture.\n";
907  return nullptr;
908  }
909 
910  PT(Texture) other = DCAST(Texture, object);
911  if (!reader.resolve()) {
912  gobj_cat.error()
913  << "Unable to fully resolve texture object file.\n";
914  return nullptr;
915  }
916 
917  return other;
918 }
919 
920 /**
921  * Writes the texture to a Panda texture object. This defines the complete
922  * Texture specification, including the image data as well as all texture
923  * properties.
924  *
925  * The filename is just for reference.
926  */
927 bool Texture::
928 write_txo(ostream &out, const string &filename) const {
929  CDReader cdata(_cycler);
930  return do_write_txo(cdata, out, filename);
931 }
932 
933 /**
934  * Reads the texture from a DDS file object. This is a Microsoft-defined file
935  * format; it is similar in principle to a txo object, in that it is designed
936  * to contain the texture image in a form as similar as possible to its
937  * runtime image, and it can contain mipmaps, pre-compressed textures, and so
938  * on.
939  *
940  * As with read_txo, the filename is just for reference.
941  */
943 read_dds(istream &in, const string &filename, bool header_only) {
944  CDWriter cdata(_cycler, true);
945  cdata->inc_properties_modified();
946  cdata->inc_image_modified();
947  return do_read_dds(cdata, in, filename, header_only);
948 }
949 
950 /**
951  * Reads the texture from a KTX file object. This is a Khronos-defined file
952  * format; it is similar in principle to a dds object, in that it is designed
953  * to contain the texture image in a form as similar as possible to its
954  * runtime image, and it can contain mipmaps, pre-compressed textures, and so
955  * on.
956  *
957  * As with read_dds, the filename is just for reference.
958  */
960 read_ktx(istream &in, const string &filename, bool header_only) {
961  CDWriter cdata(_cycler, true);
962  cdata->inc_properties_modified();
963  cdata->inc_image_modified();
964  return do_read_ktx(cdata, in, filename, header_only);
965 }
966 
967 /**
968  * Loads a texture whose filename is derived by concatenating a suffix to the
969  * filename of this texture. May return NULL, for example, if this texture
970  * doesn't have a filename.
971  */
973 load_related(const InternalName *suffix) const {
974  MutexHolder holder(_lock);
975  CDReader cdata(_cycler);
976 
977  RelatedTextures::const_iterator ti;
978  ti = _related_textures.find(suffix);
979  if (ti != _related_textures.end()) {
980  return (*ti).second;
981  }
982  if (cdata->_fullpath.empty()) {
983  return nullptr;
984  }
985  Filename main = cdata->_fullpath;
986  main.set_basename_wo_extension(main.get_basename_wo_extension() +
987  suffix->get_name());
988  PT(Texture) res;
989  if (!cdata->_alpha_fullpath.empty()) {
990  Filename alph = cdata->_alpha_fullpath;
992  suffix->get_name());
994  if (vfs->exists(alph)) {
995  // The alpha variant of the filename, with the suffix, exists. Use it
996  // to load the texture.
997  res = TexturePool::load_texture(main, alph,
998  cdata->_primary_file_num_channels,
999  cdata->_alpha_file_channel, false);
1000  } else {
1001  // If the alpha variant of the filename doesn't exist, just go ahead and
1002  // load the related texture without alpha.
1003  res = TexturePool::load_texture(main);
1004  }
1005 
1006  } else {
1007  // No alpha filename--just load the single file. It doesn't necessarily
1008  // have the same number of channels as this one.
1009  res = TexturePool::load_texture(main);
1010  }
1011 
1012  // I'm casting away the const-ness of 'this' because this field is only a
1013  // cache.
1014  ((Texture *)this)->_related_textures.insert(RelatedTextures::value_type(suffix, res));
1015  return res;
1016 }
1017 
1018 /**
1019  * Replaces the current system-RAM image with the new data, converting it
1020  * first if necessary from the indicated component-order format. See
1021  * get_ram_image_as() for specifications about the format. This method cannot
1022  * support compressed image data or sub-pages; use set_ram_image() for that.
1023  */
1025 set_ram_image_as(CPTA_uchar image, const string &supplied_format) {
1026  CDWriter cdata(_cycler, true);
1027 
1028  string format = upcase(supplied_format);
1029 
1030  // Make sure we can grab something that's uncompressed.
1031  size_t imgsize = (size_t)cdata->_x_size * (size_t)cdata->_y_size *
1032  (size_t)cdata->_z_size * (size_t)cdata->_num_views;
1033  nassertv(image.size() == (size_t)(cdata->_component_width * format.size() * imgsize));
1034 
1035  // Check if the format is already what we have internally.
1036  if ((cdata->_num_components == 1 && format.size() == 1) ||
1037  (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
1038  (cdata->_num_components == 3 && format == "BGR") ||
1039  (cdata->_num_components == 4 && format == "BGRA")) {
1040  // The format string is already our format, so we just need to copy it.
1041  do_set_ram_image(cdata, image);
1042  return;
1043  }
1044 
1045  // Create a new empty array that can hold our image.
1046  PTA_uchar newdata = PTA_uchar::empty_array(imgsize * cdata->_num_components * cdata->_component_width, get_class_type());
1047 
1048  // These ifs are for optimization of commonly used image types.
1049  if (cdata->_component_width == 1) {
1050  if (format == "RGBA" && cdata->_num_components == 4) {
1051  imgsize *= 4;
1052  for (int p = 0; p < imgsize; p += 4) {
1053  newdata[p + 2] = image[p ];
1054  newdata[p + 1] = image[p + 1];
1055  newdata[p ] = image[p + 2];
1056  newdata[p + 3] = image[p + 3];
1057  }
1058  do_set_ram_image(cdata, newdata);
1059  return;
1060  }
1061  if (format == "RGB" && cdata->_num_components == 3) {
1062  imgsize *= 3;
1063  for (int p = 0; p < imgsize; p += 3) {
1064  newdata[p + 2] = image[p ];
1065  newdata[p + 1] = image[p + 1];
1066  newdata[p ] = image[p + 2];
1067  }
1068  do_set_ram_image(cdata, newdata);
1069  return;
1070  }
1071  if (format == "A" && cdata->_num_components != 3) {
1072  // We can generally rely on alpha to be the last component.
1073  int component = cdata->_num_components - 1;
1074  for (int p = 0; p < imgsize; ++p) {
1075  newdata[component] = image[p];
1076  }
1077  do_set_ram_image(cdata, newdata);
1078  return;
1079  }
1080  for (int p = 0; p < imgsize; ++p) {
1081  for (uchar s = 0; s < format.size(); ++s) {
1082  signed char component = -1;
1083  if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1084  component = 0;
1085  } else if (format.at(s) == 'G') {
1086  component = 1;
1087  } else if (format.at(s) == 'R') {
1088  component = 2;
1089  } else if (format.at(s) == 'A') {
1090  if (cdata->_num_components != 3) {
1091  component = cdata->_num_components - 1;
1092  } else {
1093  // Ignore.
1094  }
1095  } else if (format.at(s) == '0') {
1096  // Ignore.
1097  } else if (format.at(s) == '1') {
1098  // Ignore.
1099  } else {
1100  gobj_cat.error() << "Unexpected component character '"
1101  << format.at(s) << "', expected one of RGBA!\n";
1102  return;
1103  }
1104  if (component >= 0) {
1105  newdata[p * cdata->_num_components + component] = image[p * format.size() + s];
1106  }
1107  }
1108  }
1109  do_set_ram_image(cdata, newdata);
1110  return;
1111  }
1112  for (int p = 0; p < imgsize; ++p) {
1113  for (uchar s = 0; s < format.size(); ++s) {
1114  signed char component = -1;
1115  if (format.at(s) == 'B' || (cdata->_num_components <= 2 && format.at(s) != 'A')) {
1116  component = 0;
1117  } else if (format.at(s) == 'G') {
1118  component = 1;
1119  } else if (format.at(s) == 'R') {
1120  component = 2;
1121  } else if (format.at(s) == 'A') {
1122  if (cdata->_num_components != 3) {
1123  component = cdata->_num_components - 1;
1124  } else {
1125  // Ignore.
1126  }
1127  } else if (format.at(s) == '0') {
1128  // Ignore.
1129  } else if (format.at(s) == '1') {
1130  // Ignore.
1131  } else {
1132  gobj_cat.error() << "Unexpected component character '"
1133  << format.at(s) << "', expected one of RGBA!\n";
1134  return;
1135  }
1136  if (component >= 0) {
1137  memcpy((void*)(newdata + (p * cdata->_num_components + component) * cdata->_component_width),
1138  (void*)(image + (p * format.size() + s) * cdata->_component_width),
1139  cdata->_component_width);
1140  }
1141  }
1142  }
1143  do_set_ram_image(cdata, newdata);
1144  return;
1145 }
1146 
1147 /**
1148  * Returns the flag that indicates whether this Texture is eligible to have
1149  * its main RAM copy of the texture memory dumped when the texture is prepared
1150  * for rendering. See set_keep_ram_image().
1151  */
1152 bool Texture::
1153 get_keep_ram_image() const {
1154  CDReader cdata(_cycler);
1155  return cdata->_keep_ram_image;
1156 }
1157 
1158 /**
1159  * Returns true if there is enough information in this Texture object to write
1160  * it to the bam cache successfully, false otherwise. For most textures, this
1161  * is the same as has_ram_image().
1162  */
1163 bool Texture::
1164 is_cacheable() const {
1165  CDReader cdata(_cycler);
1166  return do_has_bam_rawdata(cdata);
1167 }
1168 
1169 /**
1170  * Returns the number of contiguous mipmap levels that exist in RAM, up until
1171  * the first gap in the sequence. It is guaranteed that at least mipmap
1172  * levels [0, get_num_ram_mipmap_images()) exist.
1173  *
1174  * The number returned will never exceed the number of required mipmap images
1175  * based on the size of the texture and its filter mode.
1176  *
1177  * This method is different from get_num_ram_mipmap_images() in that it
1178  * returns only the number of mipmap levels that can actually be usefully
1179  * loaded, regardless of the actual number that may be stored.
1180  */
1181 int Texture::
1183  CDReader cdata(_cycler);
1184  if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
1185  // If we don't even have a base image, the answer is none.
1186  return 0;
1187  }
1188  if (!uses_mipmaps()) {
1189  // If we have a base image and don't require mipmapping, the answer is 1.
1190  return 1;
1191  }
1192 
1193  // Check that we have enough mipmap levels to meet the size requirements.
1194  int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
1195  int n = 0;
1196  int x = 1;
1197  while (x < size) {
1198  x = (x << 1);
1199  ++n;
1200  if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
1201  return n;
1202  }
1203  }
1204 
1205  ++n;
1206  return n;
1207 }
1208 
1209 /**
1210  * Returns the system-RAM image data associated with the nth mipmap level, if
1211  * present. Returns NULL if the nth mipmap level is not present.
1212  */
1214 get_ram_mipmap_image(int n) const {
1215  CDReader cdata(_cycler);
1216  if (n < (int)cdata->_ram_images.size() && !cdata->_ram_images[n]._image.empty()) {
1217  return cdata->_ram_images[n]._image;
1218  }
1219  return CPTA_uchar(get_class_type());
1220 }
1221 
1222 /**
1223  * Similiar to get_ram_mipmap_image(), however, in this case the void pointer
1224  * for the given ram image is returned. This will be NULL unless it has been
1225  * explicitly set.
1226  */
1228 get_ram_mipmap_pointer(int n) const {
1229  CDReader cdata(_cycler);
1230  if (n < (int)cdata->_ram_images.size()) {
1231  return cdata->_ram_images[n]._pointer_image;
1232  }
1233  return nullptr;
1234 }
1235 
1236 /**
1237  * Sets an explicit void pointer as the texture's mipmap image for the
1238  * indicated level. This is a special call to direct a texture to reference
1239  * some external image location, for instance from a webcam input.
1240  *
1241  * The texture will henceforth reference this pointer directly, instead of its
1242  * own internal storage; the user is responsible for ensuring the data at this
1243  * address remains allocated and valid, and in the correct format, during the
1244  * lifetime of the texture.
1245  */
1247 set_ram_mipmap_pointer(int n, void *image, size_t page_size) {
1248  CDWriter cdata(_cycler, true);
1249  nassertv(cdata->_ram_image_compression != CM_off || do_get_expected_ram_mipmap_image_size(cdata, n));
1250 
1251  while (n >= (int)cdata->_ram_images.size()) {
1252  cdata->_ram_images.push_back(RamImage());
1253  }
1254 
1255  cdata->_ram_images[n]._page_size = page_size;
1256  // _ram_images[n]._image.clear(); wtf is going on?!
1257  cdata->_ram_images[n]._pointer_image = image;
1258  cdata->inc_image_modified();
1259 }
1260 
1261 /**
1262  * Accepts a raw pointer cast as an int, which is then passed to
1263  * set_ram_mipmap_pointer(); see the documentation for that method.
1264  *
1265  * This variant is particularly useful to set an external pointer from a
1266  * language like Python, which doesn't support void pointers directly.
1267  */
1269 set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size) {
1270  set_ram_mipmap_pointer(n, (void*)pointer, (size_t)page_size);
1271 }
1272 
1273 /**
1274  * Discards the current system-RAM image for the nth mipmap level.
1275  */
1277 clear_ram_mipmap_image(int n) {
1278  CDWriter cdata(_cycler, true);
1279  if (n >= (int)cdata->_ram_images.size()) {
1280  return;
1281  }
1282  cdata->_ram_images[n]._page_size = 0;
1283  cdata->_ram_images[n]._image.clear();
1284  cdata->_ram_images[n]._pointer_image = nullptr;
1285 }
1286 
1287 /**
1288  * Returns a modifiable pointer to the internal "simple" texture image. See
1289  * set_simple_ram_image().
1290  */
1291 PTA_uchar Texture::
1293  CDWriter cdata(_cycler, true);
1294  cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1295  return cdata->_simple_ram_image._image;
1296 }
1297 
1298 /**
1299  * Creates an empty array for the simple ram image of the indicated size, and
1300  * returns a modifiable pointer to the new array. See set_simple_ram_image().
1301  */
1302 PTA_uchar Texture::
1303 new_simple_ram_image(int x_size, int y_size) {
1304  CDWriter cdata(_cycler, true);
1305  nassertr(cdata->_texture_type == TT_2d_texture, PTA_uchar());
1306  size_t expected_page_size = (size_t)(x_size * y_size * 4);
1307 
1308  cdata->_simple_x_size = x_size;
1309  cdata->_simple_y_size = y_size;
1310  cdata->_simple_ram_image._image = PTA_uchar::empty_array(expected_page_size);
1311  cdata->_simple_ram_image._page_size = expected_page_size;
1312  cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1313  cdata->inc_simple_image_modified();
1314 
1315  return cdata->_simple_ram_image._image;
1316 }
1317 
1318 /**
1319  * Computes the "simple" ram image by loading the main RAM image, if it is not
1320  * already available, and reducing it to 16x16 or smaller. This may be an
1321  * expensive operation.
1322  */
1325  CDWriter cdata(_cycler, true);
1326 
1327  if (cdata->_texture_type != TT_2d_texture ||
1328  cdata->_ram_image_compression != CM_off) {
1329  return;
1330  }
1331 
1332  PNMImage pnmimage;
1333  if (!do_store_one(cdata, pnmimage, 0, 0)) {
1334  return;
1335  }
1336 
1337  // Start at the suggested size from the config file.
1338  int x_size = simple_image_size.get_word(0);
1339  int y_size = simple_image_size.get_word(1);
1340 
1341  // Limit it to no larger than the source image, and also make it a power of
1342  // two.
1343  x_size = down_to_power_2(min(x_size, cdata->_x_size));
1344  y_size = down_to_power_2(min(y_size, cdata->_y_size));
1345 
1346  // Generate a reduced image of that size.
1347  PNMImage scaled(x_size, y_size, pnmimage.get_num_channels());
1348  scaled.quick_filter_from(pnmimage);
1349 
1350  // Make sure the reduced image has 4 components, by convention.
1351  if (!scaled.has_alpha()) {
1352  scaled.add_alpha();
1353  scaled.alpha_fill(1.0);
1354  }
1355  scaled.set_num_channels(4);
1356 
1357  // Now see if we can go even smaller.
1358  bool did_anything;
1359  do {
1360  did_anything = false;
1361 
1362  // Try to reduce X.
1363  if (x_size > 1) {
1364  int new_x_size = (x_size >> 1);
1365  PNMImage smaller(new_x_size, y_size, 4);
1366  smaller.quick_filter_from(scaled);
1367  PNMImage bigger(x_size, y_size, 4);
1368  bigger.quick_filter_from(smaller);
1369 
1370  if (compare_images(scaled, bigger)) {
1371  scaled.take_from(smaller);
1372  x_size = new_x_size;
1373  did_anything = true;
1374  }
1375  }
1376 
1377  // Try to reduce Y.
1378  if (y_size > 1) {
1379  int new_y_size = (y_size >> 1);
1380  PNMImage smaller(x_size, new_y_size, 4);
1381  smaller.quick_filter_from(scaled);
1382  PNMImage bigger(x_size, y_size, 4);
1383  bigger.quick_filter_from(smaller);
1384 
1385  if (compare_images(scaled, bigger)) {
1386  scaled.take_from(smaller);
1387  y_size = new_y_size;
1388  did_anything = true;
1389  }
1390  }
1391  } while (did_anything);
1392 
1393  size_t expected_page_size = (size_t)(x_size * y_size * 4);
1394  PTA_uchar image = PTA_uchar::empty_array(expected_page_size, get_class_type());
1395  convert_from_pnmimage(image, expected_page_size, x_size, 0, 0, 0, scaled, 4, 1);
1396 
1397  do_set_simple_ram_image(cdata, image, x_size, y_size);
1398  cdata->_simple_image_date_generated = (int32_t)time(nullptr);
1399 }
1400 
1401 /**
1402  * Returns a TexturePeeker object that can be used to examine the individual
1403  * texels stored within this Texture by (u, v) coordinate.
1404  *
1405  * If the texture has a ram image resident, that image is used. If it does
1406  * not have a full ram image but does have a simple_ram_image resident, that
1407  * image is used instead. If neither image is resident the full image is
1408  * reloaded.
1409  *
1410  * Returns NULL if the texture cannot find an image to load, or the texture
1411  * format is incompatible.
1412  */
1413 PT(TexturePeeker) Texture::
1414 peek() {
1415  CDWriter cdata(_cycler, unlocked_ensure_ram_image(true));
1416 
1417  PT(TexturePeeker) peeker = new TexturePeeker(this, cdata);
1418  if (peeker->is_valid()) {
1419  return peeker;
1420  }
1421 
1422  return nullptr;
1423 }
1424 
1425 /**
1426  * Indicates that the texture should be enqueued to be prepared in the
1427  * indicated prepared_objects at the beginning of the next frame. This will
1428  * ensure the texture is already loaded into texture memory if it is expected
1429  * to be rendered soon.
1430  *
1431  * Use this function instead of prepare_now() to preload textures from a user
1432  * interface standpoint.
1433  */
1434 PT(AsyncFuture) Texture::
1435 prepare(PreparedGraphicsObjects *prepared_objects) {
1436  return prepared_objects->enqueue_texture_future(this);
1437 }
1438 
1439 /**
1440  * Returns true if the texture has already been prepared or enqueued for
1441  * preparation on the indicated GSG, false otherwise.
1442  */
1444 is_prepared(PreparedGraphicsObjects *prepared_objects) const {
1445  MutexHolder holder(_lock);
1446  PreparedViews::const_iterator pvi;
1447  pvi = _prepared_views.find(prepared_objects);
1448  if (pvi != _prepared_views.end()) {
1449  return true;
1450  }
1451  return prepared_objects->is_texture_queued(this);
1452 }
1453 
1454 /**
1455  * Returns true if the texture needs to be re-loaded onto the indicated GSG,
1456  * either because its image data is out-of-date, or because it's not fully
1457  * prepared now.
1458  */
1460 was_image_modified(PreparedGraphicsObjects *prepared_objects) const {
1461  MutexHolder holder(_lock);
1462  CDReader cdata(_cycler);
1463 
1464  PreparedViews::const_iterator pvi;
1465  pvi = _prepared_views.find(prepared_objects);
1466  if (pvi != _prepared_views.end()) {
1467  const Contexts &contexts = (*pvi).second;
1468  for (int view = 0; view < cdata->_num_views; ++view) {
1469  Contexts::const_iterator ci;
1470  ci = contexts.find(view);
1471  if (ci == contexts.end()) {
1472  return true;
1473  }
1474  TextureContext *tc = (*ci).second;
1475  if (tc->was_image_modified()) {
1476  return true;
1477  }
1478  }
1479  return false;
1480  }
1481  return true;
1482 }
1483 
1484 /**
1485  * Returns the number of bytes which the texture is reported to consume within
1486  * graphics memory, for the indicated GSG. This may return a nonzero value
1487  * even if the texture is not currently resident; you should also check
1488  * get_resident() if you want to know how much space the texture is actually
1489  * consuming right now.
1490  */
1492 get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const {
1493  MutexHolder holder(_lock);
1494  CDReader cdata(_cycler);
1495 
1496  PreparedViews::const_iterator pvi;
1497  size_t total_size = 0;
1498  pvi = _prepared_views.find(prepared_objects);
1499  if (pvi != _prepared_views.end()) {
1500  const Contexts &contexts = (*pvi).second;
1501  for (int view = 0; view < cdata->_num_views; ++view) {
1502  Contexts::const_iterator ci;
1503  ci = contexts.find(view);
1504  if (ci != contexts.end()) {
1505  TextureContext *tc = (*ci).second;
1506  total_size += tc->get_data_size_bytes();
1507  }
1508  }
1509  }
1510 
1511  return total_size;
1512 }
1513 
1514 /**
1515  * Returns true if this Texture was rendered in the most recent frame within
1516  * the indicated GSG.
1517  */
1519 get_active(PreparedGraphicsObjects *prepared_objects) const {
1520  MutexHolder holder(_lock);
1521  CDReader cdata(_cycler);
1522 
1523  PreparedViews::const_iterator pvi;
1524  pvi = _prepared_views.find(prepared_objects);
1525  if (pvi != _prepared_views.end()) {
1526  const Contexts &contexts = (*pvi).second;
1527  for (int view = 0; view < cdata->_num_views; ++view) {
1528  Contexts::const_iterator ci;
1529  ci = contexts.find(view);
1530  if (ci != contexts.end()) {
1531  TextureContext *tc = (*ci).second;
1532  if (tc->get_active()) {
1533  return true;
1534  }
1535  }
1536  }
1537  }
1538  return false;
1539 }
1540 
1541 /**
1542  * Returns true if this Texture is reported to be resident within graphics
1543  * memory for the indicated GSG.
1544  */
1546 get_resident(PreparedGraphicsObjects *prepared_objects) const {
1547  MutexHolder holder(_lock);
1548  CDReader cdata(_cycler);
1549 
1550  PreparedViews::const_iterator pvi;
1551  pvi = _prepared_views.find(prepared_objects);
1552  if (pvi != _prepared_views.end()) {
1553  const Contexts &contexts = (*pvi).second;
1554  for (int view = 0; view < cdata->_num_views; ++view) {
1555  Contexts::const_iterator ci;
1556  ci = contexts.find(view);
1557  if (ci != contexts.end()) {
1558  TextureContext *tc = (*ci).second;
1559  if (tc->get_resident()) {
1560  return true;
1561  }
1562  }
1563  }
1564  }
1565  return false;
1566 }
1567 
1568 /**
1569  * Frees the texture context only on the indicated object, if it exists there.
1570  * Returns true if it was released, false if it had not been prepared.
1571  */
1573 release(PreparedGraphicsObjects *prepared_objects) {
1574  MutexHolder holder(_lock);
1575  PreparedViews::iterator pvi;
1576  pvi = _prepared_views.find(prepared_objects);
1577  if (pvi != _prepared_views.end()) {
1578  Contexts temp;
1579  temp.swap((*pvi).second);
1580  Contexts::iterator ci;
1581  for (ci = temp.begin(); ci != temp.end(); ++ci) {
1582  TextureContext *tc = (*ci).second;
1583  if (tc != nullptr) {
1584  prepared_objects->release_texture(tc);
1585  }
1586  }
1587  _prepared_views.erase(pvi);
1588  }
1589 
1590  // Maybe it wasn't prepared yet, but it's about to be.
1591  return prepared_objects->dequeue_texture(this);
1592 }
1593 
1594 /**
1595  * Frees the context allocated on all objects for which the texture has been
1596  * declared. Returns the number of contexts which have been freed.
1597  */
1599 release_all() {
1600  MutexHolder holder(_lock);
1601 
1602  // We have to traverse a copy of the _prepared_views list, because the
1603  // PreparedGraphicsObjects object will call clear_prepared() in response to
1604  // each release_texture(), and we don't want to be modifying the
1605  // _prepared_views list while we're traversing it.
1606  PreparedViews temp;
1607  temp.swap(_prepared_views);
1608  int num_freed = (int)temp.size();
1609 
1610  PreparedViews::iterator pvi;
1611  for (pvi = temp.begin(); pvi != temp.end(); ++pvi) {
1612  PreparedGraphicsObjects *prepared_objects = (*pvi).first;
1613  Contexts temp;
1614  temp.swap((*pvi).second);
1615  Contexts::iterator ci;
1616  for (ci = temp.begin(); ci != temp.end(); ++ci) {
1617  TextureContext *tc = (*ci).second;
1618  if (tc != nullptr) {
1619  prepared_objects->release_texture(tc);
1620  }
1621  }
1622  }
1623 
1624  return num_freed;
1625 }
1626 
1627 /**
1628  * Not to be confused with write(Filename), this method simply describes the
1629  * texture properties.
1630  */
1632 write(ostream &out, int indent_level) const {
1633  CDReader cdata(_cycler);
1634  indent(out, indent_level)
1635  << cdata->_texture_type << " " << get_name();
1636  if (!cdata->_filename.empty()) {
1637  out << " (from " << cdata->_filename << ")";
1638  }
1639  out << "\n";
1640 
1641  indent(out, indent_level + 2);
1642 
1643  switch (cdata->_texture_type) {
1644  case TT_1d_texture:
1645  out << "1-d, " << cdata->_x_size;
1646  break;
1647 
1648  case TT_2d_texture:
1649  out << "2-d, " << cdata->_x_size << " x " << cdata->_y_size;
1650  break;
1651 
1652  case TT_3d_texture:
1653  out << "3-d, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1654  break;
1655 
1656  case TT_2d_texture_array:
1657  out << "2-d array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1658  break;
1659 
1660  case TT_cube_map:
1661  out << "cube map, " << cdata->_x_size << " x " << cdata->_y_size;
1662  break;
1663 
1664  case TT_cube_map_array:
1665  out << "cube map array, " << cdata->_x_size << " x " << cdata->_y_size << " x " << cdata->_z_size;
1666  break;
1667 
1668  case TT_buffer_texture:
1669  out << "buffer, " << cdata->_x_size;
1670  break;
1671 
1672  case TT_1d_texture_array:
1673  out << "1-d array, " << cdata->_x_size << " x " << cdata->_y_size;
1674  break;
1675  }
1676 
1677  if (cdata->_num_views > 1) {
1678  out << " (x " << cdata->_num_views << " views)";
1679  }
1680 
1681  out << " pixels, each " << cdata->_num_components;
1682 
1683  switch (cdata->_component_type) {
1684  case T_unsigned_byte:
1685  case T_byte:
1686  out << " bytes";
1687  break;
1688 
1689  case T_unsigned_short:
1690  case T_short:
1691  out << " shorts";
1692  break;
1693 
1694  case T_half_float:
1695  out << " half";
1696  case T_float:
1697  out << " floats";
1698  break;
1699 
1700  case T_unsigned_int_24_8:
1701  case T_int:
1702  case T_unsigned_int:
1703  out << " ints";
1704  break;
1705 
1706  default:
1707  break;
1708  }
1709 
1710  out << ", ";
1711  switch (cdata->_format) {
1712  case F_color_index:
1713  out << "color_index";
1714  break;
1715  case F_depth_stencil:
1716  out << "depth_stencil";
1717  break;
1718  case F_depth_component:
1719  out << "depth_component";
1720  break;
1721  case F_depth_component16:
1722  out << "depth_component16";
1723  break;
1724  case F_depth_component24:
1725  out << "depth_component24";
1726  break;
1727  case F_depth_component32:
1728  out << "depth_component32";
1729  break;
1730 
1731  case F_rgba:
1732  out << "rgba";
1733  break;
1734  case F_rgbm:
1735  out << "rgbm";
1736  break;
1737  case F_rgba32:
1738  out << "rgba32";
1739  break;
1740  case F_rgba16:
1741  out << "rgba16";
1742  break;
1743  case F_rgba12:
1744  out << "rgba12";
1745  break;
1746  case F_rgba8:
1747  out << "rgba8";
1748  break;
1749  case F_rgba4:
1750  out << "rgba4";
1751  break;
1752 
1753  case F_rgb:
1754  out << "rgb";
1755  break;
1756  case F_rgb12:
1757  out << "rgb12";
1758  break;
1759  case F_rgb8:
1760  out << "rgb8";
1761  break;
1762  case F_rgb5:
1763  out << "rgb5";
1764  break;
1765  case F_rgba5:
1766  out << "rgba5";
1767  break;
1768  case F_rgb332:
1769  out << "rgb332";
1770  break;
1771 
1772  case F_red:
1773  out << "red";
1774  break;
1775  case F_green:
1776  out << "green";
1777  break;
1778  case F_blue:
1779  out << "blue";
1780  break;
1781  case F_alpha:
1782  out << "alpha";
1783  break;
1784  case F_luminance:
1785  out << "luminance";
1786  break;
1787  case F_luminance_alpha:
1788  out << "luminance_alpha";
1789  break;
1790  case F_luminance_alphamask:
1791  out << "luminance_alphamask";
1792  break;
1793 
1794  case F_r16:
1795  out << "r16";
1796  break;
1797  case F_rg16:
1798  out << "rg16";
1799  break;
1800  case F_rgb16:
1801  out << "rgb16";
1802  break;
1803 
1804  case F_srgb:
1805  out << "srgb";
1806  break;
1807  case F_srgb_alpha:
1808  out << "srgb_alpha";
1809  break;
1810  case F_sluminance:
1811  out << "sluminance";
1812  break;
1813  case F_sluminance_alpha:
1814  out << "sluminance_alpha";
1815  break;
1816 
1817  case F_r32i:
1818  out << "r32i";
1819  break;
1820 
1821  case F_r32:
1822  out << "r32";
1823  break;
1824  case F_rg32:
1825  out << "rg32";
1826  break;
1827  case F_rgb32:
1828  out << "rgb32";
1829  break;
1830 
1831  case F_r8i:
1832  out << "r8i";
1833  break;
1834  case F_rg8i:
1835  out << "rg8i";
1836  break;
1837  case F_rgb8i:
1838  out << "rgb8i";
1839  break;
1840  case F_rgba8i:
1841  out << "rgba8i";
1842  break;
1843  case F_r11_g11_b10:
1844  out << "r11_g11_b10";
1845  break;
1846  case F_rgb9_e5:
1847  out << "rgb9_e5";
1848  break;
1849  case F_rgb10_a2:
1850  out << "rgb10_a2";
1851  break;
1852 
1853  case F_rg:
1854  out << "rg";
1855  break;
1856 
1857  case F_r16i:
1858  out << "r16i";
1859  break;
1860  case F_rg16i:
1861  out << "rg16i";
1862  break;
1863  case F_rgb16i:
1864  out << "rgb16i";
1865  break;
1866  case F_rgba16i:
1867  out << "rgba16i";
1868  break;
1869 
1870  case F_rg32i:
1871  out << "rg32i";
1872  break;
1873  case F_rgb32i:
1874  out << "rgb32i";
1875  break;
1876  case F_rgba32i:
1877  out << "rgba32i";
1878  break;
1879  }
1880 
1881  if (cdata->_compression != CM_default) {
1882  out << ", compression " << cdata->_compression;
1883  }
1884  out << "\n";
1885 
1886  indent(out, indent_level + 2);
1887 
1888  cdata->_default_sampler.output(out);
1889 
1890  if (do_has_ram_image(cdata)) {
1891  indent(out, indent_level + 2)
1892  << do_get_ram_image_size(cdata) << " bytes in ram, compression "
1893  << cdata->_ram_image_compression << "\n";
1894 
1895  if (cdata->_ram_images.size() > 1) {
1896  int count = 0;
1897  size_t total_size = 0;
1898  for (size_t n = 1; n < cdata->_ram_images.size(); ++n) {
1899  if (!cdata->_ram_images[n]._image.empty()) {
1900  ++count;
1901  total_size += cdata->_ram_images[n]._image.size();
1902  } else {
1903  // Stop at the first gap.
1904  break;
1905  }
1906  }
1907  indent(out, indent_level + 2)
1908  << count
1909  << " mipmap levels also present in ram (" << total_size
1910  << " bytes).\n";
1911  }
1912 
1913  } else {
1914  indent(out, indent_level + 2)
1915  << "no ram image\n";
1916  }
1917 
1918  if (!cdata->_simple_ram_image._image.empty()) {
1919  indent(out, indent_level + 2)
1920  << "simple image: " << cdata->_simple_x_size << " x "
1921  << cdata->_simple_y_size << ", "
1922  << cdata->_simple_ram_image._image.size() << " bytes\n";
1923  }
1924 }
1925 
1926 
1927 /**
1928  * Changes the size of the texture, padding if necessary, and setting the pad
1929  * region as well.
1930  */
1932 set_size_padded(int x, int y, int z) {
1933  CDWriter cdata(_cycler, true);
1934  if (do_get_auto_texture_scale(cdata) != ATS_none) {
1935  do_set_x_size(cdata, up_to_power_2(x));
1936  do_set_y_size(cdata, up_to_power_2(y));
1937 
1938  if (cdata->_texture_type == TT_3d_texture) {
1939  // Only pad 3D textures. It does not make sense to do so for cube maps
1940  // or 2D texture arrays.
1941  do_set_z_size(cdata, up_to_power_2(z));
1942  } else {
1943  do_set_z_size(cdata, z);
1944  }
1945  } else {
1946  do_set_x_size(cdata, x);
1947  do_set_y_size(cdata, y);
1948  do_set_z_size(cdata, z);
1949  }
1950  do_set_pad_size(cdata,
1951  cdata->_x_size - x,
1952  cdata->_y_size - y,
1953  cdata->_z_size - z);
1954 }
1955 
1956 /**
1957  * Specifies the size of the texture as it exists in its original disk file,
1958  * before any Panda scaling.
1959  */
1961 set_orig_file_size(int x, int y, int z) {
1962  CDWriter cdata(_cycler, true);
1963  cdata->_orig_file_x_size = x;
1964  cdata->_orig_file_y_size = y;
1965 
1966  nassertv(z == cdata->_z_size);
1967 }
1968 
1969 /**
1970  * Creates a context for the texture on the particular GSG, if it does not
1971  * already exist. Returns the new (or old) TextureContext. This assumes that
1972  * the GraphicsStateGuardian is the currently active rendering context and
1973  * that it is ready to accept new textures. If this is not necessarily the
1974  * case, you should use prepare() instead.
1975  *
1976  * Normally, this is not called directly except by the GraphicsStateGuardian;
1977  * a texture does not need to be explicitly prepared by the user before it may
1978  * be rendered.
1979  */
1981 prepare_now(int view,
1982  PreparedGraphicsObjects *prepared_objects,
1984  MutexHolder holder(_lock);
1985  CDReader cdata(_cycler);
1986 
1987  // Don't exceed the actual number of views.
1988  view = max(min(view, cdata->_num_views - 1), 0);
1989 
1990  // Get the list of PreparedGraphicsObjects for this view.
1991  Contexts &contexts = _prepared_views[prepared_objects];
1992  Contexts::const_iterator pvi;
1993  pvi = contexts.find(view);
1994  if (pvi != contexts.end()) {
1995  return (*pvi).second;
1996  }
1997 
1998  TextureContext *tc = prepared_objects->prepare_texture_now(this, view, gsg);
1999  contexts[view] = tc;
2000 
2001  return tc;
2002 }
2003 
2004 /**
2005  * Returns the smallest power of 2 greater than or equal to value.
2006  */
2008 up_to_power_2(int value) {
2009  if (value <= 1) {
2010  return 1;
2011  }
2012  int bit = get_next_higher_bit(((unsigned int)value) - 1);
2013  return (1 << bit);
2014 }
2015 
2016 /**
2017  * Returns the largest power of 2 less than or equal to value.
2018  */
2020 down_to_power_2(int value) {
2021  if (value <= 1) {
2022  return 1;
2023  }
2024  int bit = get_next_higher_bit(((unsigned int)value) >> 1);
2025  return (1 << bit);
2026 }
2027 
2028 /**
2029  * Asks the PNMImage to change its scale when it reads the image, according to
2030  * the whims of the Config.prc file.
2031  *
2032  * For most efficient results, this method should be called after
2033  * pnmimage.read_header() has been called, but before pnmimage.read(). This
2034  * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2035  * already loaded; in this case it will rescale the image on the spot. Also
2036  * see rescale_texture().
2037  */
2039 consider_rescale(PNMImage &pnmimage) {
2040  consider_rescale(pnmimage, get_name(), get_auto_texture_scale());
2041 }
2042 
2043 /**
2044  * Asks the PNMImage to change its scale when it reads the image, according to
2045  * the whims of the Config.prc file.
2046  *
2047  * For most efficient results, this method should be called after
2048  * pnmimage.read_header() has been called, but before pnmimage.read(). This
2049  * method may also be called after pnmimage.read(), i.e. when the pnmimage is
2050  * already loaded; in this case it will rescale the image on the spot. Also
2051  * see rescale_texture().
2052  */
2054 consider_rescale(PNMImage &pnmimage, const string &name, AutoTextureScale auto_texture_scale) {
2055  int new_x_size = pnmimage.get_x_size();
2056  int new_y_size = pnmimage.get_y_size();
2057  if (adjust_size(new_x_size, new_y_size, name, false, auto_texture_scale)) {
2058  if (pnmimage.is_valid()) {
2059  // The image is already loaded. Rescale on the spot.
2060  PNMImage new_image(new_x_size, new_y_size, pnmimage.get_num_channels(),
2061  pnmimage.get_maxval(), pnmimage.get_type(),
2062  pnmimage.get_color_space());
2063  new_image.quick_filter_from(pnmimage);
2064  pnmimage.take_from(new_image);
2065  } else {
2066  // Rescale while reading. Some image types (e.g. jpeg) can take
2067  // advantage of this.
2068  pnmimage.set_read_size(new_x_size, new_y_size);
2069  }
2070  }
2071 }
2072 
2073 /**
2074  * Returns the indicated TextureType converted to a string word.
2075  */
2077 format_texture_type(TextureType tt) {
2078  switch (tt) {
2079  case TT_1d_texture:
2080  return "1d_texture";
2081  case TT_2d_texture:
2082  return "2d_texture";
2083  case TT_3d_texture:
2084  return "3d_texture";
2085  case TT_2d_texture_array:
2086  return "2d_texture_array";
2087  case TT_cube_map:
2088  return "cube_map";
2089  case TT_cube_map_array:
2090  return "cube_map_array";
2091  case TT_buffer_texture:
2092  return "buffer_texture";
2093  case TT_1d_texture_array:
2094  return "1d_texture_array";
2095  }
2096  return "**invalid**";
2097 }
2098 
2099 /**
2100  * Returns the TextureType corresponding to the indicated string word.
2101  */
2102 Texture::TextureType Texture::
2103 string_texture_type(const string &str) {
2104  if (cmp_nocase(str, "1d_texture") == 0) {
2105  return TT_1d_texture;
2106  } else if (cmp_nocase(str, "2d_texture") == 0) {
2107  return TT_2d_texture;
2108  } else if (cmp_nocase(str, "3d_texture") == 0) {
2109  return TT_3d_texture;
2110  } else if (cmp_nocase(str, "2d_texture_array") == 0) {
2111  return TT_2d_texture_array;
2112  } else if (cmp_nocase(str, "cube_map") == 0) {
2113  return TT_cube_map;
2114  } else if (cmp_nocase(str, "cube_map_array") == 0) {
2115  return TT_cube_map_array;
2116  } else if (cmp_nocase(str, "buffer_texture") == 0) {
2117  return TT_buffer_texture;
2118  }
2119 
2120  gobj_cat->error()
2121  << "Invalid Texture::TextureType value: " << str << "\n";
2122  return TT_2d_texture;
2123 }
2124 
2125 /**
2126  * Returns the indicated ComponentType converted to a string word.
2127  */
2129 format_component_type(ComponentType ct) {
2130  switch (ct) {
2131  case T_unsigned_byte:
2132  return "unsigned_byte";
2133  case T_unsigned_short:
2134  return "unsigned_short";
2135  case T_float:
2136  return "float";
2137  case T_unsigned_int_24_8:
2138  return "unsigned_int_24_8";
2139  case T_int:
2140  return "int";
2141  case T_byte:
2142  return "unsigned_byte";
2143  case T_short:
2144  return "short";
2145  case T_half_float:
2146  return "half_float";
2147  case T_unsigned_int:
2148  return "unsigned_int";
2149  }
2150 
2151  return "**invalid**";
2152 }
2153 
2154 /**
2155  * Returns the ComponentType corresponding to the indicated string word.
2156  */
2157 Texture::ComponentType Texture::
2158 string_component_type(const string &str) {
2159  if (cmp_nocase(str, "unsigned_byte") == 0) {
2160  return T_unsigned_byte;
2161  } else if (cmp_nocase(str, "unsigned_short") == 0) {
2162  return T_unsigned_short;
2163  } else if (cmp_nocase(str, "float") == 0) {
2164  return T_float;
2165  } else if (cmp_nocase(str, "unsigned_int_24_8") == 0) {
2166  return T_unsigned_int_24_8;
2167  } else if (cmp_nocase(str, "int") == 0) {
2168  return T_int;
2169  } else if (cmp_nocase(str, "byte") == 0) {
2170  return T_byte;
2171  } else if (cmp_nocase(str, "short") == 0) {
2172  return T_short;
2173  } else if (cmp_nocase(str, "half_float") == 0) {
2174  return T_half_float;
2175  } else if (cmp_nocase(str, "unsigned_int") == 0) {
2176  return T_unsigned_int;
2177  }
2178 
2179  gobj_cat->error()
2180  << "Invalid Texture::ComponentType value: " << str << "\n";
2181  return T_unsigned_byte;
2182 }
2183 
2184 /**
2185  * Returns the indicated Format converted to a string word.
2186  */
2188 format_format(Format format) {
2189  switch (format) {
2190  case F_depth_stencil:
2191  return "depth_stencil";
2192  case F_depth_component:
2193  return "depth_component";
2194  case F_depth_component16:
2195  return "depth_component16";
2196  case F_depth_component24:
2197  return "depth_component24";
2198  case F_depth_component32:
2199  return "depth_component32";
2200  case F_color_index:
2201  return "color_index";
2202  case F_red:
2203  return "red";
2204  case F_green:
2205  return "green";
2206  case F_blue:
2207  return "blue";
2208  case F_alpha:
2209  return "alpha";
2210  case F_rgb:
2211  return "rgb";
2212  case F_rgb5:
2213  return "rgb5";
2214  case F_rgb8:
2215  return "rgb8";
2216  case F_rgb12:
2217  return "rgb12";
2218  case F_rgb332:
2219  return "rgb332";
2220  case F_rgba:
2221  return "rgba";
2222  case F_rgbm:
2223  return "rgbm";
2224  case F_rgba4:
2225  return "rgba4";
2226  case F_rgba5:
2227  return "rgba5";
2228  case F_rgba8:
2229  return "rgba8";
2230  case F_rgba12:
2231  return "rgba12";
2232  case F_luminance:
2233  return "luminance";
2234  case F_luminance_alpha:
2235  return "luminance_alpha";
2236  case F_luminance_alphamask:
2237  return "luminance_alphamask";
2238  case F_rgba16:
2239  return "rgba16";
2240  case F_rgba32:
2241  return "rgba32";
2242  case F_r16:
2243  return "r16";
2244  case F_rg16:
2245  return "rg16";
2246  case F_rgb16:
2247  return "rgb16";
2248  case F_srgb:
2249  return "srgb";
2250  case F_srgb_alpha:
2251  return "srgb_alpha";
2252  case F_sluminance:
2253  return "sluminance";
2254  case F_sluminance_alpha:
2255  return "sluminance_alpha";
2256  case F_r32i:
2257  return "r32i";
2258  case F_r32:
2259  return "r32";
2260  case F_rg32:
2261  return "rg32";
2262  case F_rgb32:
2263  return "rgb32";
2264  case F_r8i:
2265  return "r8i";
2266  case F_rg8i:
2267  return "rg8i";
2268  case F_rgb8i:
2269  return "rgb8i";
2270  case F_rgba8i:
2271  return "rgba8i";
2272  case F_r11_g11_b10:
2273  return "r11g11b10";
2274  case F_rgb9_e5:
2275  return "rgb9_e5";
2276  case F_rgb10_a2:
2277  return "rgb10_a2";
2278  case F_rg:
2279  return "rg";
2280  case F_r16i:
2281  return "r16i";
2282  case F_rg16i:
2283  return "rg16i";
2284  case F_rgb16i:
2285  return "rgb16i";
2286  case F_rgba16i:
2287  return "rgba16i";
2288  case F_rg32i:
2289  return "rg32i";
2290  case F_rgb32i:
2291  return "rgb32i";
2292  case F_rgba32i:
2293  return "rgba32i";
2294  }
2295  return "**invalid**";
2296 }
2297 
2298 /**
2299  * Returns the Format corresponding to the indicated string word.
2300  */
2301 Texture::Format Texture::
2302 string_format(const string &str) {
2303  if (cmp_nocase(str, "depth_stencil") == 0) {
2304  return F_depth_stencil;
2305  } else if (cmp_nocase(str, "depth_component") == 0) {
2306  return F_depth_component;
2307  } else if (cmp_nocase(str, "depth_component16") == 0 || cmp_nocase(str, "d16") == 0) {
2308  return F_depth_component16;
2309  } else if (cmp_nocase(str, "depth_component24") == 0 || cmp_nocase(str, "d24") == 0) {
2310  return F_depth_component24;
2311  } else if (cmp_nocase(str, "depth_component32") == 0 || cmp_nocase(str, "d32") == 0) {
2312  return F_depth_component32;
2313  } else if (cmp_nocase(str, "color_index") == 0) {
2314  return F_color_index;
2315  } else if (cmp_nocase(str, "red") == 0) {
2316  return F_red;
2317  } else if (cmp_nocase(str, "green") == 0) {
2318  return F_green;
2319  } else if (cmp_nocase(str, "blue") == 0) {
2320  return F_blue;
2321  } else if (cmp_nocase(str, "alpha") == 0) {
2322  return F_alpha;
2323  } else if (cmp_nocase(str, "rgb") == 0) {
2324  return F_rgb;
2325  } else if (cmp_nocase(str, "rgb5") == 0) {
2326  return F_rgb5;
2327  } else if (cmp_nocase(str, "rgb8") == 0 || cmp_nocase(str, "r8g8b8") == 0) {
2328  return F_rgb8;
2329  } else if (cmp_nocase(str, "rgb12") == 0) {
2330  return F_rgb12;
2331  } else if (cmp_nocase(str, "rgb332") == 0 || cmp_nocase(str, "r3g3b2") == 0) {
2332  return F_rgb332;
2333  } else if (cmp_nocase(str, "rgba") == 0) {
2334  return F_rgba;
2335  } else if (cmp_nocase(str, "rgbm") == 0) {
2336  return F_rgbm;
2337  } else if (cmp_nocase(str, "rgba4") == 0) {
2338  return F_rgba4;
2339  } else if (cmp_nocase(str, "rgba5") == 0) {
2340  return F_rgba5;
2341  } else if (cmp_nocase(str, "rgba8") == 0 || cmp_nocase(str, "r8g8b8a8") == 0) {
2342  return F_rgba8;
2343  } else if (cmp_nocase(str, "rgba12") == 0) {
2344  return F_rgba12;
2345  } else if (cmp_nocase(str, "luminance") == 0) {
2346  return F_luminance;
2347  } else if (cmp_nocase(str, "luminance_alpha") == 0) {
2348  return F_luminance_alpha;
2349  } else if (cmp_nocase(str, "luminance_alphamask") == 0) {
2350  return F_luminance_alphamask;
2351  } else if (cmp_nocase(str, "rgba16") == 0 || cmp_nocase(str, "r16g16b16a16") == 0) {
2352  return F_rgba16;
2353  } else if (cmp_nocase(str, "rgba32") == 0 || cmp_nocase(str, "r32g32b32a32") == 0) {
2354  return F_rgba32;
2355  } else if (cmp_nocase(str, "r16") == 0 || cmp_nocase(str, "red16") == 0) {
2356  return F_r16;
2357  } else if (cmp_nocase(str, "r16i") == 0) {
2358  return F_r16i;
2359  } else if (cmp_nocase(str, "rg16") == 0 || cmp_nocase(str, "r16g16") == 0) {
2360  return F_rg16;
2361  } else if (cmp_nocase(str, "rgb16") == 0 || cmp_nocase(str, "r16g16b16") == 0) {
2362  return F_rgb16;
2363  } else if (cmp_nocase(str, "srgb") == 0) {
2364  return F_srgb;
2365  } else if (cmp_nocase(str, "srgb_alpha") == 0) {
2366  return F_srgb_alpha;
2367  } else if (cmp_nocase(str, "sluminance") == 0) {
2368  return F_sluminance;
2369  } else if (cmp_nocase(str, "sluminance_alpha") == 0) {
2370  return F_sluminance_alpha;
2371  } else if (cmp_nocase(str, "r32i") == 0) {
2372  return F_r32i;
2373  } else if (cmp_nocase(str, "r32") == 0 || cmp_nocase(str, "red32") == 0) {
2374  return F_r32;
2375  } else if (cmp_nocase(str, "rg32") == 0 || cmp_nocase(str, "r32g32") == 0) {
2376  return F_rg32;
2377  } else if (cmp_nocase(str, "rgb32") == 0 || cmp_nocase(str, "r32g32b32") == 0) {
2378  return F_rgb32;
2379  } else if (cmp_nocase_uh(str, "r8i") == 0) {
2380  return F_r8i;
2381  } else if (cmp_nocase_uh(str, "rg8i") == 0 || cmp_nocase_uh(str, "r8g8i") == 0) {
2382  return F_rg8i;
2383  } else if (cmp_nocase_uh(str, "rgb8i") == 0 || cmp_nocase_uh(str, "r8g8b8i") == 0) {
2384  return F_rgb8i;
2385  } else if (cmp_nocase_uh(str, "rgba8i") == 0 || cmp_nocase_uh(str, "r8g8b8a8i") == 0) {
2386  return F_rgba8i;
2387  } else if (cmp_nocase(str, "r11g11b10") == 0) {
2388  return F_r11_g11_b10;
2389  } else if (cmp_nocase(str, "rgb9_e5") == 0) {
2390  return F_rgb9_e5;
2391  } else if (cmp_nocase_uh(str, "rgb10_a2") == 0 || cmp_nocase(str, "r10g10b10a2") == 0) {
2392  return F_rgb10_a2;
2393  } else if (cmp_nocase_uh(str, "rg") == 0) {
2394  return F_rg;
2395  } else if (cmp_nocase_uh(str, "r16i") == 0) {
2396  return F_r16i;
2397  } else if (cmp_nocase_uh(str, "rg16i") == 0 || cmp_nocase_uh(str, "r16g16i") == 0) {
2398  return F_rg16i;
2399  } else if (cmp_nocase_uh(str, "rgb16i") == 0 || cmp_nocase_uh(str, "r16g16b16i") == 0) {
2400  return F_rgb16i;
2401  } else if (cmp_nocase_uh(str, "rgba16i") == 0 || cmp_nocase_uh(str, "r16g16b16a16i") == 0) {
2402  return F_rgba16i;
2403  } else if (cmp_nocase_uh(str, "rg32i") == 0 || cmp_nocase_uh(str, "r32g32i") == 0) {
2404  return F_rg32i;
2405  } else if (cmp_nocase_uh(str, "rgb32i") == 0 || cmp_nocase_uh(str, "r32g32b32i") == 0) {
2406  return F_rgb32i;
2407  } else if (cmp_nocase_uh(str, "rgba32i") == 0 || cmp_nocase_uh(str, "r32g32b32a32i") == 0) {
2408  return F_rgba32i;
2409  }
2410 
2411  gobj_cat->error()
2412  << "Invalid Texture::Format value: " << str << "\n";
2413  return F_rgba;
2414 }
2415 
2416 /**
2417  * Returns the indicated CompressionMode converted to a string word.
2418  */
2420 format_compression_mode(CompressionMode cm) {
2421  switch (cm) {
2422  case CM_default:
2423  return "default";
2424  case CM_off:
2425  return "off";
2426  case CM_on:
2427  return "on";
2428  case CM_fxt1:
2429  return "fxt1";
2430  case CM_dxt1:
2431  return "dxt1";
2432  case CM_dxt2:
2433  return "dxt2";
2434  case CM_dxt3:
2435  return "dxt3";
2436  case CM_dxt4:
2437  return "dxt4";
2438  case CM_dxt5:
2439  return "dxt5";
2440  case CM_pvr1_2bpp:
2441  return "pvr1_2bpp";
2442  case CM_pvr1_4bpp:
2443  return "pvr1_4bpp";
2444  case CM_rgtc:
2445  return "rgtc";
2446  case CM_etc1:
2447  return "etc1";
2448  case CM_etc2:
2449  return "etc2";
2450  case CM_eac:
2451  return "eac";
2452  }
2453 
2454  return "**invalid**";
2455 }
2456 
2457 /**
2458  * Returns the CompressionMode value associated with the given string
2459  * representation.
2460  */
2461 Texture::CompressionMode Texture::
2462 string_compression_mode(const string &str) {
2463  if (cmp_nocase_uh(str, "default") == 0) {
2464  return CM_default;
2465  } else if (cmp_nocase_uh(str, "off") == 0) {
2466  return CM_off;
2467  } else if (cmp_nocase_uh(str, "on") == 0) {
2468  return CM_on;
2469  } else if (cmp_nocase_uh(str, "fxt1") == 0) {
2470  return CM_fxt1;
2471  } else if (cmp_nocase_uh(str, "dxt1") == 0) {
2472  return CM_dxt1;
2473  } else if (cmp_nocase_uh(str, "dxt2") == 0) {
2474  return CM_dxt2;
2475  } else if (cmp_nocase_uh(str, "dxt3") == 0) {
2476  return CM_dxt3;
2477  } else if (cmp_nocase_uh(str, "dxt4") == 0) {
2478  return CM_dxt4;
2479  } else if (cmp_nocase_uh(str, "dxt5") == 0) {
2480  return CM_dxt5;
2481  } else if (cmp_nocase_uh(str, "pvr1_2bpp") == 0) {
2482  return CM_pvr1_2bpp;
2483  } else if (cmp_nocase_uh(str, "pvr1_4bpp") == 0) {
2484  return CM_pvr1_4bpp;
2485  } else if (cmp_nocase_uh(str, "rgtc") == 0) {
2486  return CM_rgtc;
2487  } else if (cmp_nocase_uh(str, "etc1") == 0) {
2488  return CM_etc1;
2489  } else if (cmp_nocase_uh(str, "etc2") == 0) {
2490  return CM_etc2;
2491  } else if (cmp_nocase_uh(str, "eac") == 0) {
2492  return CM_eac;
2493  }
2494 
2495  gobj_cat->error()
2496  << "Invalid Texture::CompressionMode value: " << str << "\n";
2497  return CM_default;
2498 }
2499 
2500 
2501 /**
2502  * Returns the indicated QualityLevel converted to a string word.
2503  */
2505 format_quality_level(QualityLevel ql) {
2506  switch (ql) {
2507  case QL_default:
2508  return "default";
2509  case QL_fastest:
2510  return "fastest";
2511  case QL_normal:
2512  return "normal";
2513  case QL_best:
2514  return "best";
2515  }
2516 
2517  return "**invalid**";
2518 }
2519 
2520 /**
2521  * Returns the QualityLevel value associated with the given string
2522  * representation.
2523  */
2524 Texture::QualityLevel Texture::
2525 string_quality_level(const string &str) {
2526  if (cmp_nocase(str, "default") == 0) {
2527  return QL_default;
2528  } else if (cmp_nocase(str, "fastest") == 0) {
2529  return QL_fastest;
2530  } else if (cmp_nocase(str, "normal") == 0) {
2531  return QL_normal;
2532  } else if (cmp_nocase(str, "best") == 0) {
2533  return QL_best;
2534  }
2535 
2536  gobj_cat->error()
2537  << "Invalid Texture::QualityLevel value: " << str << "\n";
2538  return QL_default;
2539 }
2540 
2541 /**
2542  * This method is called by the GraphicsEngine at the beginning of the frame
2543  * *after* a texture has been successfully uploaded to graphics memory. It is
2544  * intended as a callback so the texture can release its RAM image, if
2545  * _keep_ram_image is false.
2546  *
2547  * This is called indirectly when the GSG calls
2548  * GraphicsEngine::texture_uploaded().
2549  */
2551 texture_uploaded() {
2552  CDLockedReader cdata(_cycler);
2553 
2554  if (!keep_texture_ram && !cdata->_keep_ram_image) {
2555  // Once we have prepared the texture, we can generally safely remove the
2556  // pixels from main RAM. The GSG is now responsible for remembering what
2557  // it looks like.
2558 
2559  CDWriter cdataw(_cycler, cdata, false);
2560  if (gobj_cat.is_debug()) {
2561  gobj_cat.debug()
2562  << "Dumping RAM for texture " << get_name() << "\n";
2563  }
2564  do_clear_ram_image(cdataw);
2565  }
2566 }
2567 
2568 /**
2569  * Should be overridden by derived classes to return true if cull_callback()
2570  * has been defined. Otherwise, returns false to indicate cull_callback()
2571  * does not need to be called for this node during the cull traversal.
2572  */
2574 has_cull_callback() const {
2575  return false;
2576 }
2577 
2578 /**
2579  * If has_cull_callback() returns true, this function will be called during
2580  * the cull traversal to perform any additional operations that should be
2581  * performed at cull time.
2582  *
2583  * This is called each time the Texture is discovered applied to a Geom in the
2584  * traversal. It should return true if the Geom is visible, false if it
2585  * should be omitted.
2586  */
2588 cull_callback(CullTraverser *, const CullTraverserData &) const {
2589  return true;
2590 }
2591 
2592 /**
2593  * A factory function to make a new Texture, used to pass to the TexturePool.
2594  */
2595 PT(Texture) Texture::
2596 make_texture() {
2597  return new Texture;
2598 }
2599 
2600 /**
2601  * Returns true if the indicated component type is unsigned, false otherwise.
2602  */
2604 is_unsigned(Texture::ComponentType ctype) {
2605  return (ctype == T_unsigned_byte ||
2606  ctype == T_unsigned_short ||
2607  ctype == T_unsigned_int_24_8 ||
2608  ctype == T_unsigned_int);
2609 }
2610 
2611 /**
2612  * Returns true if the indicated compression mode is one of the specific
2613  * compression types, false otherwise.
2614  */
2616 is_specific(Texture::CompressionMode compression) {
2617  switch (compression) {
2618  case CM_default:
2619  case CM_off:
2620  case CM_on:
2621  return false;
2622 
2623  default:
2624  return true;
2625  }
2626 }
2627 
2628 /**
2629  * Returns true if the indicated format includes alpha, false otherwise.
2630  */
2632 has_alpha(Format format) {
2633  switch (format) {
2634  case F_alpha:
2635  case F_rgba:
2636  case F_rgbm:
2637  case F_rgba4:
2638  case F_rgba5:
2639  case F_rgba8:
2640  case F_rgba12:
2641  case F_rgba16:
2642  case F_rgba32:
2643  case F_luminance_alpha:
2644  case F_luminance_alphamask:
2645  case F_srgb_alpha:
2646  case F_sluminance_alpha:
2647  case F_rgba8i:
2648  case F_rgb10_a2:
2649  case F_rgba16i:
2650  case F_rgba32i:
2651  return true;
2652 
2653  default:
2654  return false;
2655  }
2656 }
2657 
2658 /**
2659  * Returns true if the indicated format includes a binary alpha only, false
2660  * otherwise.
2661  */
2663 has_binary_alpha(Format format) {
2664  switch (format) {
2665  case F_rgbm:
2666  return true;
2667 
2668  default:
2669  return false;
2670  }
2671 }
2672 
2673 /**
2674  * Returns true if the indicated format is in the sRGB color space, false
2675  * otherwise.
2676  */
2678 is_srgb(Format format) {
2679  switch (format) {
2680  case F_srgb:
2681  case F_srgb_alpha:
2682  case F_sluminance:
2683  case F_sluminance_alpha:
2684  return true;
2685 
2686  default:
2687  return false;
2688  }
2689 }
2690 
2691 /**
2692  * Returns true if the indicated format is an integer format, false otherwise.
2693  */
2695 is_integer(Format format) {
2696  switch (format) {
2697  case F_r32i:
2698  case F_r8i:
2699  case F_rg8i:
2700  case F_rgb8i:
2701  case F_rgba8i:
2702  case F_r16i:
2703  case F_rg16i:
2704  case F_rgb16i:
2705  case F_rgba16i:
2706  case F_rg32i:
2707  case F_rgb32i:
2708  case F_rgba32i:
2709  return true;
2710 
2711  default:
2712  return false;
2713  }
2714 }
2715 
2716 /**
2717  * Computes the proper size of the texture, based on the original size, the
2718  * filename, and the resizing whims of the config file.
2719  *
2720  * x_size and y_size should be loaded with the texture image's original size
2721  * on disk. On return, they will be loaded with the texture's in-memory
2722  * target size. The return value is true if the size has been adjusted, or
2723  * false if it is the same.
2724  */
2726 adjust_size(int &x_size, int &y_size, const string &name,
2727  bool for_padding, AutoTextureScale auto_texture_scale) {
2728  bool exclude = false;
2729  int num_excludes = exclude_texture_scale.get_num_unique_values();
2730  for (int i = 0; i < num_excludes && !exclude; ++i) {
2731  GlobPattern pat(exclude_texture_scale.get_unique_value(i));
2732  if (pat.matches(name)) {
2733  exclude = true;
2734  }
2735  }
2736 
2737  int new_x_size = x_size;
2738  int new_y_size = y_size;
2739 
2740  if (!exclude) {
2741  new_x_size = (int)cfloor(new_x_size * texture_scale + 0.5);
2742  new_y_size = (int)cfloor(new_y_size * texture_scale + 0.5);
2743 
2744  // Don't auto-scale below 4 in either dimension. This causes problems for
2745  // DirectX and texture compression.
2746  new_x_size = min(max(new_x_size, (int)texture_scale_limit), x_size);
2747  new_y_size = min(max(new_y_size, (int)texture_scale_limit), y_size);
2748  }
2749 
2750  AutoTextureScale ats = auto_texture_scale;
2751  if (ats == ATS_unspecified) {
2752  ats = get_textures_power_2();
2753  }
2754  if (!for_padding && ats == ATS_pad) {
2755  // If we're not calculating the padding size--that is, we're calculating
2756  // the initial scaling size instead--then ignore ATS_pad, and treat it the
2757  // same as ATS_none.
2758  ats = ATS_none;
2759  }
2760 
2761  switch (ats) {
2762  case ATS_down:
2763  new_x_size = down_to_power_2(new_x_size);
2764  new_y_size = down_to_power_2(new_y_size);
2765  break;
2766 
2767  case ATS_up:
2768  case ATS_pad:
2769  new_x_size = up_to_power_2(new_x_size);
2770  new_y_size = up_to_power_2(new_y_size);
2771  break;
2772 
2773  case ATS_none:
2774  case ATS_unspecified:
2775  break;
2776  }
2777 
2778  ats = textures_square.get_value();
2779  if (!for_padding && ats == ATS_pad) {
2780  ats = ATS_none;
2781  }
2782  switch (ats) {
2783  case ATS_down:
2784  new_x_size = new_y_size = min(new_x_size, new_y_size);
2785  break;
2786 
2787  case ATS_up:
2788  case ATS_pad:
2789  new_x_size = new_y_size = max(new_x_size, new_y_size);
2790  break;
2791 
2792  case ATS_none:
2793  case ATS_unspecified:
2794  break;
2795  }
2796 
2797  if (!exclude) {
2798  int max_dimension = max_texture_dimension;
2799 
2800  if (max_dimension < 0) {
2802  if (gsg != nullptr) {
2803  max_dimension = gsg->get_max_texture_dimension();
2804  }
2805  }
2806 
2807  if (max_dimension > 0) {
2808  new_x_size = min(new_x_size, (int)max_dimension);
2809  new_y_size = min(new_y_size, (int)max_dimension);
2810  }
2811  }
2812 
2813  if (x_size != new_x_size || y_size != new_y_size) {
2814  x_size = new_x_size;
2815  y_size = new_y_size;
2816  return true;
2817  }
2818 
2819  return false;
2820 }
2821 
2822 /**
2823  * May be called prior to calling read_txo() or any bam-related Texture-
2824  * creating callback, to ensure that the proper dynamic libraries for a
2825  * Texture of the current class type, and the indicated filename, have been
2826  * already loaded.
2827  *
2828  * This is a low-level function that should not normally need to be called
2829  * directly by the user.
2830  *
2831  * Note that for best results you must first create a Texture object of the
2832  * appropriate class type for your filename, for instance with
2833  * TexturePool::make_texture().
2834  */
2836 ensure_loader_type(const Filename &filename) {
2837  // For a plain Texture type, this doesn't need to do anything.
2838 }
2839 
2840 /**
2841  * Called by TextureContext to give the Texture a chance to mark itself dirty
2842  * before rendering, if necessary.
2843  */
2844 void Texture::
2845 reconsider_dirty() {
2846 }
2847 
2848 /**
2849  * Works like adjust_size, but also considers the texture class. Movie
2850  * textures, for instance, always pad outwards, regardless of textures-
2851  * power-2.
2852  */
2853 bool Texture::
2854 do_adjust_this_size(const CData *cdata, int &x_size, int &y_size, const string &name,
2855  bool for_padding) const {
2856  return adjust_size(x_size, y_size, name, for_padding, cdata->_auto_texture_scale);
2857 }
2858 
2859 /**
2860  * The internal implementation of the various read() methods.
2861  */
2862 bool Texture::
2863 do_read(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
2864  int primary_file_num_channels, int alpha_file_channel,
2865  int z, int n, bool read_pages, bool read_mipmaps,
2866  const LoaderOptions &options, BamCacheRecord *record) {
2867  PStatTimer timer(_texture_read_pcollector);
2868 
2869  if (options.get_auto_texture_scale() != ATS_unspecified) {
2870  cdata->_auto_texture_scale = options.get_auto_texture_scale();
2871  }
2872 
2873  bool header_only = ((options.get_texture_flags() & (LoaderOptions::TF_preload | LoaderOptions::TF_preload_simple)) == 0);
2874  if (record != nullptr) {
2875  header_only = false;
2876  }
2877 
2878  if ((z == 0 || read_pages) && (n == 0 || read_mipmaps)) {
2879  // When we re-read the page 0 of the base image, we clear everything and
2880  // start over.
2881  do_clear_ram_image(cdata);
2882  }
2883 
2884  if (is_txo_filename(fullpath)) {
2885  if (record != nullptr) {
2886  record->add_dependent_file(fullpath);
2887  }
2888  return do_read_txo_file(cdata, fullpath);
2889  }
2890 
2891  if (is_dds_filename(fullpath)) {
2892  if (record != nullptr) {
2893  record->add_dependent_file(fullpath);
2894  }
2895  return do_read_dds_file(cdata, fullpath, header_only);
2896  }
2897 
2898  if (is_ktx_filename(fullpath)) {
2899  if (record != nullptr) {
2900  record->add_dependent_file(fullpath);
2901  }
2902  return do_read_ktx_file(cdata, fullpath, header_only);
2903  }
2904 
2905  // If read_pages or read_mipmaps is specified, then z and n actually
2906  // indicate z_size and n_size, respectively--the numerical limits on which
2907  // to search for filenames.
2908  int z_size = z;
2909  int n_size = n;
2910 
2911  // Certain texture types have an implicit z_size. If z_size is omitted,
2912  // choose an appropriate default based on the texture type.
2913  if (z_size == 0) {
2914  switch (cdata->_texture_type) {
2915  case TT_1d_texture:
2916  case TT_2d_texture:
2917  case TT_buffer_texture:
2918  z_size = 1;
2919  break;
2920 
2921  case TT_cube_map:
2922  z_size = 6;
2923  break;
2924 
2925  default:
2926  break;
2927  }
2928  }
2929 
2930  int num_views = 0;
2931  if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
2932  // We'll be loading a multiview texture.
2933  read_pages = true;
2934  if (options.get_texture_num_views() != 0) {
2935  num_views = options.get_texture_num_views();
2936  do_set_num_views(cdata, num_views);
2937  }
2938  }
2939 
2941 
2942  if (read_pages && read_mipmaps) {
2943  // Read a sequence of pages * mipmap levels.
2944  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
2945  Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
2946  do_set_z_size(cdata, z_size);
2947 
2948  n = 0;
2949  while (true) {
2950  // For mipmap level 0, the total number of pages might be determined by
2951  // the number of files we find. After mipmap level 0, though, the
2952  // number of pages is predetermined.
2953  if (n != 0) {
2954  z_size = do_get_expected_mipmap_z_size(cdata, n);
2955  }
2956 
2957  z = 0;
2958 
2959  Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2960  Filename alpha_n_pattern = Filename::pattern_filename(alpha_fullpath_pattern.get_filename_index(z));
2961 
2962  if (!n_pattern.has_hash()) {
2963  gobj_cat.error()
2964  << "Filename requires two different hash sequences: " << fullpath
2965  << "\n";
2966  return false;
2967  }
2968 
2969  Filename file = n_pattern.get_filename_index(n);
2970  Filename alpha_file = alpha_n_pattern.get_filename_index(n);
2971 
2972  if ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
2973  (n_size != 0 && n < n_size)) {
2974  // Continue through the loop.
2975  } else {
2976  // We've reached the end of the mipmap sequence.
2977  break;
2978  }
2979 
2980  int num_pages = z_size * num_views;
2981  while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
2982  (num_pages != 0 && z < num_pages)) {
2983  if (!do_read_one(cdata, file, alpha_file, z, n, primary_file_num_channels,
2984  alpha_file_channel, options, header_only, record)) {
2985  return false;
2986  }
2987  ++z;
2988 
2989  n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
2990  file = n_pattern.get_filename_index(n);
2991  alpha_file = alpha_n_pattern.get_filename_index(n);
2992  }
2993 
2994  if (n == 0 && n_size == 0) {
2995  // If n_size is not specified, it gets implicitly set after we read
2996  // the base texture image (which determines the size of the texture).
2997  n_size = do_get_expected_num_mipmap_levels(cdata);
2998  }
2999  ++n;
3000  }
3001  cdata->_fullpath = fullpath_pattern;
3002  cdata->_alpha_fullpath = alpha_fullpath_pattern;
3003 
3004  } else if (read_pages) {
3005  // Read a sequence of cube map or 3-D texture pages.
3006  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3007  Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3008  if (!fullpath_pattern.has_hash()) {
3009  gobj_cat.error()
3010  << "Filename requires a hash mark: " << fullpath
3011  << "\n";
3012  return false;
3013  }
3014 
3015  do_set_z_size(cdata, z_size);
3016  z = 0;
3017  Filename file = fullpath_pattern.get_filename_index(z);
3018  Filename alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3019 
3020  int num_pages = z_size * num_views;
3021  while ((num_pages == 0 && (vfs->exists(file) || z == 0)) ||
3022  (num_pages != 0 && z < num_pages)) {
3023  if (!do_read_one(cdata, file, alpha_file, z, 0, primary_file_num_channels,
3024  alpha_file_channel, options, header_only, record)) {
3025  return false;
3026  }
3027  ++z;
3028 
3029  file = fullpath_pattern.get_filename_index(z);
3030  alpha_file = alpha_fullpath_pattern.get_filename_index(z);
3031  }
3032  cdata->_fullpath = fullpath_pattern;
3033  cdata->_alpha_fullpath = alpha_fullpath_pattern;
3034 
3035  } else if (read_mipmaps) {
3036  // Read a sequence of mipmap levels.
3037  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
3038  Filename alpha_fullpath_pattern = Filename::pattern_filename(alpha_fullpath);
3039  if (!fullpath_pattern.has_hash()) {
3040  gobj_cat.error()
3041  << "Filename requires a hash mark: " << fullpath
3042  << "\n";
3043  return false;
3044  }
3045 
3046  n = 0;
3047  Filename file = fullpath_pattern.get_filename_index(n);
3048  Filename alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3049 
3050  while ((n_size == 0 && (vfs->exists(file) || n == 0)) ||
3051  (n_size != 0 && n < n_size)) {
3052  if (!do_read_one(cdata, file, alpha_file, z, n,
3053  primary_file_num_channels, alpha_file_channel,
3054  options, header_only, record)) {
3055  return false;
3056  }
3057  ++n;
3058 
3059  if (n_size == 0 && n >= do_get_expected_num_mipmap_levels(cdata)) {
3060  // Don't try to read more than the requisite number of mipmap levels
3061  // (unless the user insisted on it for some reason).
3062  break;
3063  }
3064 
3065  file = fullpath_pattern.get_filename_index(n);
3066  alpha_file = alpha_fullpath_pattern.get_filename_index(n);
3067  }
3068  cdata->_fullpath = fullpath_pattern;
3069  cdata->_alpha_fullpath = alpha_fullpath_pattern;
3070 
3071  } else {
3072  // Just an ordinary read of one file.
3073  if (!do_read_one(cdata, fullpath, alpha_fullpath, z, n,
3074  primary_file_num_channels, alpha_file_channel,
3075  options, header_only, record)) {
3076  return false;
3077  }
3078  }
3079 
3080  cdata->_has_read_pages = read_pages;
3081  cdata->_has_read_mipmaps = read_mipmaps;
3082  cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
3083 
3084  if (header_only) {
3085  // If we were only supposed to be checking the image header information,
3086  // don't let the Texture think that it's got the image now.
3087  do_clear_ram_image(cdata);
3088  } else {
3089  if ((options.get_texture_flags() & LoaderOptions::TF_preload) != 0) {
3090  // If we intend to keep the ram image around, consider compressing it
3091  // etc.
3092  bool generate_mipmaps = ((options.get_texture_flags() & LoaderOptions::TF_generate_mipmaps) != 0);
3093  bool allow_compression = ((options.get_texture_flags() & LoaderOptions::TF_allow_compression) != 0);
3094  do_consider_auto_process_ram_image(cdata, generate_mipmaps || uses_mipmaps(), allow_compression);
3095  }
3096  }
3097 
3098  return true;
3099 }
3100 
3101 /**
3102  * Called only from do_read(), this method reads a single image file, either
3103  * one page or one mipmap level.
3104  */
3105 bool Texture::
3106 do_read_one(CData *cdata, const Filename &fullpath, const Filename &alpha_fullpath,
3107  int z, int n, int primary_file_num_channels, int alpha_file_channel,
3108  const LoaderOptions &options, bool header_only, BamCacheRecord *record) {
3109  if (record != nullptr) {
3110  nassertr(!header_only, false);
3111  record->add_dependent_file(fullpath);
3112  }
3113 
3114  PNMImage image;
3115  PfmFile pfm;
3116  PNMReader *image_reader = image.make_reader(fullpath, nullptr, false);
3117  if (image_reader == nullptr) {
3118  gobj_cat.error()
3119  << "Texture::read() - couldn't read: " << fullpath << endl;
3120  return false;
3121  }
3122  image.copy_header_from(*image_reader);
3123 
3124  AutoTextureScale auto_texture_scale = do_get_auto_texture_scale(cdata);
3125 
3126  // If it's a floating-point image file, read it by default into a floating-
3127  // point texture.
3128  bool read_floating_point;
3129  int texture_load_type = (options.get_texture_flags() & (LoaderOptions::TF_integer | LoaderOptions::TF_float));
3130  switch (texture_load_type) {
3131  case LoaderOptions::TF_integer:
3132  read_floating_point = false;
3133  break;
3134 
3135  case LoaderOptions::TF_float:
3136  read_floating_point = true;
3137  break;
3138 
3139  default:
3140  // Neither TF_integer nor TF_float was specified; determine which way the
3141  // texture wants to be loaded.
3142  read_floating_point = (image_reader->is_floating_point());
3143  if (!alpha_fullpath.empty()) {
3144  read_floating_point = false;
3145  }
3146  }
3147 
3148  if (header_only || textures_header_only) {
3149  int x_size = image.get_x_size();
3150  int y_size = image.get_y_size();
3151  if (z == 0 && n == 0) {
3152  cdata->_orig_file_x_size = x_size;
3153  cdata->_orig_file_y_size = y_size;
3154  }
3155 
3156  if (textures_header_only) {
3157  // In this mode, we never intend to load the actual texture image
3158  // anyway, so we don't even need to make the size right.
3159  x_size = 1;
3160  y_size = 1;
3161 
3162  } else {
3163  adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale);
3164  }
3165 
3166  if (read_floating_point) {
3167  pfm.clear(x_size, y_size, image.get_num_channels());
3168  } else {
3169  image = PNMImage(x_size, y_size, image.get_num_channels(),
3170  image.get_maxval(), image.get_type(),
3171  image.get_color_space());
3172  image.fill(0.2, 0.3, 1.0);
3173  if (image.has_alpha()) {
3174  image.alpha_fill(1.0);
3175  }
3176  }
3177  delete image_reader;
3178 
3179  } else {
3180  if (z == 0 && n == 0) {
3181  int x_size = image.get_x_size();
3182  int y_size = image.get_y_size();
3183 
3184  cdata->_orig_file_x_size = x_size;
3185  cdata->_orig_file_y_size = y_size;
3186 
3187  if (adjust_size(x_size, y_size, fullpath.get_basename(), false, auto_texture_scale)) {
3188  image.set_read_size(x_size, y_size);
3189  }
3190  } else {
3191  image.set_read_size(do_get_expected_mipmap_x_size(cdata, n),
3192  do_get_expected_mipmap_y_size(cdata, n));
3193  }
3194 
3195  if (image.get_x_size() != image.get_read_x_size() ||
3196  image.get_y_size() != image.get_read_y_size()) {
3197  gobj_cat.info()
3198  << "Implicitly rescaling " << fullpath.get_basename() << " from "
3199  << image.get_x_size() << " by " << image.get_y_size() << " to "
3200  << image.get_read_x_size() << " by " << image.get_read_y_size()
3201  << "\n";
3202  }
3203 
3204  bool success;
3205  if (read_floating_point) {
3206  success = pfm.read(image_reader);
3207  } else {
3208  success = image.read(image_reader);
3209  }
3210 
3211  if (!success) {
3212  gobj_cat.error()
3213  << "Texture::read() - couldn't read: " << fullpath << endl;
3214  return false;
3215  }
3217  }
3218 
3219  PNMImage alpha_image;
3220  if (!alpha_fullpath.empty()) {
3221  PNMReader *alpha_image_reader = alpha_image.make_reader(alpha_fullpath, nullptr, false);
3222  if (alpha_image_reader == nullptr) {
3223  gobj_cat.error()
3224  << "Texture::read() - couldn't read: " << alpha_fullpath << endl;
3225  return false;
3226  }
3227  alpha_image.copy_header_from(*alpha_image_reader);
3228 
3229  if (record != nullptr) {
3230  record->add_dependent_file(alpha_fullpath);
3231  }
3232 
3233  if (header_only || textures_header_only) {
3234  int x_size = image.get_x_size();
3235  int y_size = image.get_y_size();
3236  alpha_image = PNMImage(x_size, y_size, alpha_image.get_num_channels(),
3237  alpha_image.get_maxval(), alpha_image.get_type(),
3238  alpha_image.get_color_space());
3239  alpha_image.fill(1.0);
3240  if (alpha_image.has_alpha()) {
3241  alpha_image.alpha_fill(1.0);
3242  }
3243  delete alpha_image_reader;
3244 
3245  } else {
3246  if (image.get_x_size() != alpha_image.get_x_size() ||
3247  image.get_y_size() != alpha_image.get_y_size()) {
3248  gobj_cat.info()
3249  << "Implicitly rescaling " << alpha_fullpath.get_basename()
3250  << " from " << alpha_image.get_x_size() << " by "
3251  << alpha_image.get_y_size() << " to " << image.get_x_size()
3252  << " by " << image.get_y_size() << "\n";
3253  alpha_image.set_read_size(image.get_x_size(), image.get_y_size());
3254  }
3255 
3256  if (!alpha_image.read(alpha_image_reader)) {
3257  gobj_cat.error()
3258  << "Texture::read() - couldn't read (alpha): " << alpha_fullpath << endl;
3259  return false;
3260  }
3262  }
3263  }
3264 
3265  if (z == 0 && n == 0) {
3266  if (!has_name()) {
3267  set_name(fullpath.get_basename_wo_extension());
3268  }
3269  if (cdata->_filename.empty()) {
3270  cdata->_filename = fullpath;
3271  cdata->_alpha_filename = alpha_fullpath;
3272 
3273  // The first time we set the filename via a read() operation, we clear
3274  // keep_ram_image. The user can always set it again later if he needs
3275  // to.
3276  cdata->_keep_ram_image = false;
3277  }
3278 
3279  cdata->_fullpath = fullpath;
3280  cdata->_alpha_fullpath = alpha_fullpath;
3281  }
3282 
3283  if (!alpha_fullpath.empty()) {
3284  // The grayscale (alpha channel) image must be the same size as the main
3285  // image. This should really have been already guaranteed by the above.
3286  if (image.get_x_size() != alpha_image.get_x_size() ||
3287  image.get_y_size() != alpha_image.get_y_size()) {
3288  gobj_cat.info()
3289  << "Automatically rescaling " << alpha_fullpath.get_basename()
3290  << " from " << alpha_image.get_x_size() << " by "
3291  << alpha_image.get_y_size() << " to " << image.get_x_size()
3292  << " by " << image.get_y_size() << "\n";
3293 
3294  PNMImage scaled(image.get_x_size(), image.get_y_size(),
3295  alpha_image.get_num_channels(),
3296  alpha_image.get_maxval(), alpha_image.get_type(),
3297  alpha_image.get_color_space());
3298  scaled.quick_filter_from(alpha_image);
3300  alpha_image = scaled;
3301  }
3302  }
3303 
3304  if (n == 0) {
3305  consider_downgrade(image, primary_file_num_channels, get_name());
3306  cdata->_primary_file_num_channels = image.get_num_channels();
3307  cdata->_alpha_file_channel = 0;
3308  }
3309 
3310  if (!alpha_fullpath.empty()) {
3311  // Make the original image a 4-component image by taking the grayscale
3312  // value from the second image.
3313  image.add_alpha();
3314 
3315  if (alpha_file_channel == 4 ||
3316  (alpha_file_channel == 2 && alpha_image.get_num_channels() == 2)) {
3317 
3318  if (!alpha_image.has_alpha()) {
3319  gobj_cat.error()
3320  << alpha_fullpath.get_basename() << " has no channel " << alpha_file_channel << ".\n";
3321  } else {
3322  // Use the alpha channel.
3323  for (int x = 0; x < image.get_x_size(); x++) {
3324  for (int y = 0; y < image.get_y_size(); y++) {
3325  image.set_alpha(x, y, alpha_image.get_alpha(x, y));
3326  }
3327  }
3328  }
3329  cdata->_alpha_file_channel = alpha_image.get_num_channels();
3330 
3331  } else if (alpha_file_channel >= 1 && alpha_file_channel <= 3 &&
3332  alpha_image.get_num_channels() >= 3) {
3333  // Use the appropriate red, green, or blue channel.
3334  for (int x = 0; x < image.get_x_size(); x++) {
3335  for (int y = 0; y < image.get_y_size(); y++) {
3336  image.set_alpha(x, y, alpha_image.get_channel_val(x, y, alpha_file_channel - 1));
3337  }
3338  }
3339  cdata->_alpha_file_channel = alpha_file_channel;
3340 
3341  } else {
3342  // Use the grayscale channel.
3343  for (int x = 0; x < image.get_x_size(); x++) {
3344  for (int y = 0; y < image.get_y_size(); y++) {
3345  image.set_alpha(x, y, alpha_image.get_gray(x, y));
3346  }
3347  }
3348  cdata->_alpha_file_channel = 0;
3349  }
3350  }
3351 
3352  if (read_floating_point) {
3353  if (!do_load_one(cdata, pfm, fullpath.get_basename(), z, n, options)) {
3354  return false;
3355  }
3356  } else {
3357  // Now see if we want to pad the image within a larger power-of-2 image.
3358  int pad_x_size = 0;
3359  int pad_y_size = 0;
3360  if (do_get_auto_texture_scale(cdata) == ATS_pad) {
3361  int new_x_size = image.get_x_size();
3362  int new_y_size = image.get_y_size();
3363  if (do_adjust_this_size(cdata, new_x_size, new_y_size, fullpath.get_basename(), true)) {
3364  pad_x_size = new_x_size - image.get_x_size();
3365  pad_y_size = new_y_size - image.get_y_size();
3366  PNMImage new_image(new_x_size, new_y_size, image.get_num_channels(),
3367  image.get_maxval(), image.get_type(),
3368  image.get_color_space());
3369  new_image.copy_sub_image(image, 0, new_y_size - image.get_y_size());
3370  image.take_from(new_image);
3371  }
3372  }
3373 
3374  if (!do_load_one(cdata, image, fullpath.get_basename(), z, n, options)) {
3375  return false;
3376  }
3377 
3378  do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
3379  }
3380  return true;
3381 }
3382 
3383 /**
3384  * Internal method to load a single page or mipmap level.
3385  */
3386 bool Texture::
3387 do_load_one(CData *cdata, const PNMImage &pnmimage, const string &name, int z, int n,
3388  const LoaderOptions &options) {
3389  if (cdata->_ram_images.size() <= 1 && n == 0) {
3390  // A special case for mipmap level 0. When we load mipmap level 0, unless
3391  // we already have mipmap levels, it determines the image properties like
3392  // size and number of components.
3393  if (!do_reconsider_z_size(cdata, z, options)) {
3394  return false;
3395  }
3396  nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3397 
3398  if (z == 0) {
3399  ComponentType component_type = T_unsigned_byte;
3400  xelval maxval = pnmimage.get_maxval();
3401  if (maxval > 255) {
3402  component_type = T_unsigned_short;
3403  }
3404 
3405  if (!do_reconsider_image_properties(cdata, pnmimage.get_x_size(), pnmimage.get_y_size(),
3406  pnmimage.get_num_channels(), component_type,
3407  z, options)) {
3408  return false;
3409  }
3410  }
3411 
3412  do_modify_ram_image(cdata);
3413  cdata->_loaded_from_image = true;
3414  }
3415 
3416  do_modify_ram_mipmap_image(cdata, n);
3417 
3418  // Ensure the PNMImage is an appropriate size.
3419  int x_size = do_get_expected_mipmap_x_size(cdata, n);
3420  int y_size = do_get_expected_mipmap_y_size(cdata, n);
3421  if (pnmimage.get_x_size() != x_size ||
3422  pnmimage.get_y_size() != y_size) {
3423  gobj_cat.info()
3424  << "Automatically rescaling " << name;
3425  if (n != 0) {
3426  gobj_cat.info(false)
3427  << " mipmap level " << n;
3428  }
3429  gobj_cat.info(false)
3430  << " from " << pnmimage.get_x_size() << " by "
3431  << pnmimage.get_y_size() << " to " << x_size << " by "
3432  << y_size << "\n";
3433 
3434  PNMImage scaled(x_size, y_size, pnmimage.get_num_channels(),
3435  pnmimage.get_maxval(), pnmimage.get_type(),
3436  pnmimage.get_color_space());
3437  scaled.quick_filter_from(pnmimage);
3439 
3440  convert_from_pnmimage(cdata->_ram_images[n]._image,
3441  do_get_expected_ram_mipmap_page_size(cdata, n),
3442  x_size, 0, 0, z, scaled,
3443  cdata->_num_components, cdata->_component_width);
3444  } else {
3445  // Now copy the pixel data from the PNMImage into our internal
3446  // cdata->_image component.
3447  convert_from_pnmimage(cdata->_ram_images[n]._image,
3448  do_get_expected_ram_mipmap_page_size(cdata, n),
3449  x_size, 0, 0, z, pnmimage,
3450  cdata->_num_components, cdata->_component_width);
3451  }
3453 
3454  return true;
3455 }
3456 
3457 /**
3458  * Internal method to load a single page or mipmap level.
3459  */
3460 bool Texture::
3461 do_load_one(CData *cdata, const PfmFile &pfm, const string &name, int z, int n,
3462  const LoaderOptions &options) {
3463  if (cdata->_ram_images.size() <= 1 && n == 0) {
3464  // A special case for mipmap level 0. When we load mipmap level 0, unless
3465  // we already have mipmap levels, it determines the image properties like
3466  // size and number of components.
3467  if (!do_reconsider_z_size(cdata, z, options)) {
3468  return false;
3469  }
3470  nassertr(z >= 0 && z < cdata->_z_size * cdata->_num_views, false);
3471 
3472  if (z == 0) {
3473  ComponentType component_type = T_float;
3474  if (!do_reconsider_image_properties(cdata, pfm.get_x_size(), pfm.get_y_size(),
3475  pfm.get_num_channels(), component_type,
3476  z, options)) {
3477  return false;
3478  }
3479  }
3480 
3481  do_modify_ram_image(cdata);
3482  cdata->_loaded_from_image = true;
3483  }
3484 
3485  do_modify_ram_mipmap_image(cdata, n);
3486 
3487  // Ensure the PfmFile is an appropriate size.
3488  int x_size = do_get_expected_mipmap_x_size(cdata, n);
3489  int y_size = do_get_expected_mipmap_y_size(cdata, n);
3490  if (pfm.get_x_size() != x_size ||
3491  pfm.get_y_size() != y_size) {
3492  gobj_cat.info()
3493  << "Automatically rescaling " << name;
3494  if (n != 0) {
3495  gobj_cat.info(false)
3496  << " mipmap level " << n;
3497  }
3498  gobj_cat.info(false)
3499  << " from " << pfm.get_x_size() << " by "
3500  << pfm.get_y_size() << " to " << x_size << " by "
3501  << y_size << "\n";
3502 
3503  PfmFile scaled(pfm);
3504  scaled.resize(x_size, y_size);
3506 
3507  convert_from_pfm(cdata->_ram_images[n]._image,
3508  do_get_expected_ram_mipmap_page_size(cdata, n), z,
3509  scaled, cdata->_num_components, cdata->_component_width);
3510  } else {
3511  // Now copy the pixel data from the PfmFile into our internal
3512  // cdata->_image component.
3513  convert_from_pfm(cdata->_ram_images[n]._image,
3514  do_get_expected_ram_mipmap_page_size(cdata, n), z,
3515  pfm, cdata->_num_components, cdata->_component_width);
3516  }
3518 
3519  return true;
3520 }
3521 
3522 /**
3523  * Internal method to load an image into a section of a texture page or mipmap
3524  * level.
3525  */
3526 bool Texture::
3527 do_load_sub_image(CData *cdata, const PNMImage &image, int x, int y, int z, int n) {
3528  nassertr(n >= 0 && (size_t)n < cdata->_ram_images.size(), false);
3529 
3530  int tex_x_size = do_get_expected_mipmap_x_size(cdata, n);
3531  int tex_y_size = do_get_expected_mipmap_y_size(cdata, n);
3532  int tex_z_size = do_get_expected_mipmap_z_size(cdata, n);
3533 
3534  nassertr(x >= 0 && x < tex_x_size, false);
3535  nassertr(y >= 0 && y < tex_y_size, false);
3536  nassertr(z >= 0 && z < tex_z_size, false);
3537 
3538  nassertr(image.get_x_size() + x <= tex_x_size, false);
3539  nassertr(image.get_y_size() + y <= tex_y_size, false);
3540 
3541  // Flip y
3542  y = cdata->_y_size - (image.get_y_size() + y);
3543 
3544  cdata->inc_image_modified();
3545  do_modify_ram_mipmap_image(cdata, n);
3546  convert_from_pnmimage(cdata->_ram_images[n]._image,
3547  do_get_expected_ram_mipmap_page_size(cdata, n),
3548  tex_x_size, x, y, z, image,
3549  cdata->_num_components, cdata->_component_width);
3550 
3551  return true;
3552 }
3553 
3554 /**
3555  * Called internally when read() detects a txo file. Assumes the lock is
3556  * already held.
3557  */
3558 bool Texture::
3559 do_read_txo_file(CData *cdata, const Filename &fullpath) {
3561 
3562  Filename filename = Filename::binary_filename(fullpath);
3563  PT(VirtualFile) file = vfs->get_file(filename);
3564  if (file == nullptr) {
3565  // No such file.
3566  gobj_cat.error()
3567  << "Could not find " << fullpath << "\n";
3568  return false;
3569  }
3570 
3571  if (gobj_cat.is_debug()) {
3572  gobj_cat.debug()
3573  << "Reading texture object " << filename << "\n";
3574  }
3575 
3576  istream *in = file->open_read_file(true);
3577  bool success = do_read_txo(cdata, *in, fullpath);
3578  vfs->close_read_file(in);
3579 
3580  cdata->_fullpath = fullpath;
3581  cdata->_alpha_fullpath = Filename();
3582  cdata->_keep_ram_image = false;
3583 
3584  return success;
3585 }
3586 
3587 /**
3588  *
3589  */
3590 bool Texture::
3591 do_read_txo(CData *cdata, istream &in, const string &filename) {
3592  PT(Texture) other = make_from_txo(in, filename);
3593  if (other == nullptr) {
3594  return false;
3595  }
3596 
3597  CDReader cdata_other(other->_cycler);
3598  Namable::operator = (*other);
3599  do_assign(cdata, other, cdata_other);
3600 
3601  cdata->_loaded_from_image = true;
3602  cdata->_loaded_from_txo = true;
3603  cdata->_has_read_pages = false;
3604  cdata->_has_read_mipmaps = false;
3605  cdata->_num_mipmap_levels_read = 0;
3606  return true;
3607 }
3608 
3609 /**
3610  * Called internally when read() detects a DDS file. Assumes the lock is
3611  * already held.
3612  */
3613 bool Texture::
3614 do_read_dds_file(CData *cdata, const Filename &fullpath, bool header_only) {
3616 
3617  Filename filename = Filename::binary_filename(fullpath);
3618  PT(VirtualFile) file = vfs->get_file(filename);
3619  if (file == nullptr) {
3620  // No such file.
3621  gobj_cat.error()
3622  << "Could not find " << fullpath << "\n";
3623  return false;
3624  }
3625 
3626  if (gobj_cat.is_debug()) {
3627  gobj_cat.debug()
3628  << "Reading DDS file " << filename << "\n";
3629  }
3630 
3631  istream *in = file->open_read_file(true);
3632  bool success = do_read_dds(cdata, *in, fullpath, header_only);
3633  vfs->close_read_file(in);
3634 
3635  if (!has_name()) {
3636  set_name(fullpath.get_basename_wo_extension());
3637  }
3638 
3639  cdata->_fullpath = fullpath;
3640  cdata->_alpha_fullpath = Filename();
3641  cdata->_keep_ram_image = false;
3642 
3643  return success;
3644 }
3645 
3646 /**
3647  *
3648  */
3649 bool Texture::
3650 do_read_dds(CData *cdata, istream &in, const string &filename, bool header_only) {
3651  StreamReader dds(in);
3652 
3653  // DDS header (19 words)
3654  DDSHeader header;
3655  header.dds_magic = dds.get_uint32();
3656  header.dds_size = dds.get_uint32();
3657  header.dds_flags = dds.get_uint32();
3658  header.height = dds.get_uint32();
3659  header.width = dds.get_uint32();
3660  header.pitch = dds.get_uint32();
3661  header.depth = dds.get_uint32();
3662  header.num_levels = dds.get_uint32();
3663  dds.skip_bytes(44);
3664 
3665  // Pixelformat (8 words)
3666  header.pf.pf_size = dds.get_uint32();
3667  header.pf.pf_flags = dds.get_uint32();
3668  header.pf.four_cc = dds.get_uint32();
3669  header.pf.rgb_bitcount = dds.get_uint32();
3670  header.pf.r_mask = dds.get_uint32();
3671  header.pf.g_mask = dds.get_uint32();
3672  header.pf.b_mask = dds.get_uint32();
3673  header.pf.a_mask = dds.get_uint32();
3674 
3675  // Caps (4 words)
3676  header.caps.caps1 = dds.get_uint32();
3677  header.caps.caps2 = dds.get_uint32();
3678  header.caps.ddsx = dds.get_uint32();
3679  dds.skip_bytes(4);
3680 
3681  // Pad out to 32 words
3682  dds.skip_bytes(4);
3683 
3684  if (header.dds_magic != DDS_MAGIC || (in.fail() || in.eof())) {
3685  gobj_cat.error()
3686  << filename << " is not a DDS file.\n";
3687  return false;
3688  }
3689 
3690  if ((header.dds_flags & DDSD_MIPMAPCOUNT) == 0) {
3691  // No bit set means only the base mipmap level.
3692  header.num_levels = 1;
3693 
3694  } else if (header.num_levels == 0) {
3695  // Some files seem to have this set to 0 for some reason--existing readers
3696  // assume 0 means 1.
3697  header.num_levels = 1;
3698  }
3699 
3700  TextureType texture_type;
3701  if (header.caps.caps2 & DDSCAPS2_CUBEMAP) {
3702  static const unsigned int all_faces =
3703  (DDSCAPS2_CUBEMAP_POSITIVEX |
3704  DDSCAPS2_CUBEMAP_POSITIVEY |
3705  DDSCAPS2_CUBEMAP_POSITIVEZ |
3706  DDSCAPS2_CUBEMAP_NEGATIVEX |
3707  DDSCAPS2_CUBEMAP_NEGATIVEY |
3708  DDSCAPS2_CUBEMAP_NEGATIVEZ);
3709  if ((header.caps.caps2 & all_faces) != all_faces) {
3710  gobj_cat.error()
3711  << filename << " is missing some cube map faces; cannot load.\n";
3712  return false;
3713  }
3714  header.depth = 6;
3715  texture_type = TT_cube_map;
3716 
3717  } else if (header.caps.caps2 & DDSCAPS2_VOLUME) {
3718  texture_type = TT_3d_texture;
3719 
3720  } else {
3721  texture_type = TT_2d_texture;
3722  header.depth = 1;
3723  }
3724 
3725  // Determine the function to use to read the DDS image.
3726  typedef PTA_uchar (*ReadDDSLevelFunc)(Texture *tex, Texture::CData *cdata,
3727  const DDSHeader &header, int n, istream &in);
3728  ReadDDSLevelFunc func = nullptr;
3729 
3730  Format format = F_rgb;
3731  ComponentType component_type = T_unsigned_byte;
3732 
3733  do_clear_ram_image(cdata);
3734  CompressionMode compression = CM_off;
3735 
3736  if ((header.pf.pf_flags & DDPF_FOURCC) != 0 &&
3737  header.pf.four_cc == 0x30315844) { // 'DX10'
3738  // A DirectX 10 style texture, which has an additional header.
3739  func = read_dds_level_generic_uncompressed;
3740  unsigned int dxgi_format = dds.get_uint32();
3741  unsigned int dimension = dds.get_uint32();
3742  unsigned int misc_flag = dds.get_uint32();
3743  unsigned int array_size = dds.get_uint32();
3744  /*unsigned int alpha_mode = */dds.get_uint32();
3745 
3746  switch (dxgi_format) {
3747  case 2: // DXGI_FORMAT_R32G32B32A32_FLOAT
3748  format = F_rgba32;
3749  component_type = T_float;
3750  func = read_dds_level_abgr32;
3751  break;
3752  case 10: // DXGI_FORMAT_R16G16B16A16_FLOAT
3753  format = F_rgba16;
3754  component_type = T_half_float;
3755  func = read_dds_level_abgr16;
3756  break;
3757  case 11: // DXGI_FORMAT_R16G16B16A16_UNORM
3758  format = F_rgba16;
3759  component_type = T_unsigned_short;
3760  func = read_dds_level_abgr16;
3761  break;
3762  case 12: // DXGI_FORMAT_R16G16B16A16_UINT
3763  format = F_rgba16i;
3764  component_type = T_unsigned_short;
3765  func = read_dds_level_abgr16;
3766  break;
3767  case 14: // DXGI_FORMAT_R16G16B16A16_SINT
3768  format = F_rgba16i;
3769  component_type = T_short;
3770  func = read_dds_level_abgr16;
3771  break;
3772  case 16: // DXGI_FORMAT_R32G32_FLOAT
3773  format = F_rg32;
3774  component_type = T_float;
3775  func = read_dds_level_raw;
3776  break;
3777  case 17: // DXGI_FORMAT_R32G32_UINT
3778  format = F_rg32i;
3779  component_type = T_unsigned_int;
3780  func = read_dds_level_raw;
3781  break;
3782  case 18: // DXGI_FORMAT_R32G32_SINT
3783  format = F_rg32i;
3784  component_type = T_int;
3785  func = read_dds_level_raw;
3786  break;
3787  case 27: // DXGI_FORMAT_R8G8B8A8_TYPELESS
3788  case 28: // DXGI_FORMAT_R8G8B8A8_UNORM
3789  format = F_rgba8;
3790  func = read_dds_level_abgr8;
3791  break;
3792  case 29: // DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
3793  format = F_srgb_alpha;
3794  func = read_dds_level_abgr8;
3795  break;
3796  case 30: // DXGI_FORMAT_R8G8B8A8_UINT
3797  format = F_rgba8i;
3798  func = read_dds_level_abgr8;
3799  break;
3800  case 31: // DXGI_FORMAT_R8G8B8A8_SNORM
3801  format = F_rgba8;
3802  component_type = T_byte;
3803  func = read_dds_level_abgr8;
3804  break;
3805  case 32: // DXGI_FORMAT_R8G8B8A8_SINT
3806  format = F_rgba8i;
3807  component_type = T_byte;
3808  func = read_dds_level_abgr8;
3809  break;
3810  case 34: // DXGI_FORMAT_R16G16_FLOAT:
3811  format = F_rg16;
3812  component_type = T_half_float;
3813  func = read_dds_level_raw;
3814  break;
3815  case 35: // DXGI_FORMAT_R16G16_UNORM:
3816  format = F_rg16;
3817  component_type = T_unsigned_short;
3818  func = read_dds_level_raw;
3819  break;
3820  case 36: // DXGI_FORMAT_R16G16_UINT:
3821  format = F_rg16i;
3822  component_type = T_unsigned_short;
3823  func = read_dds_level_raw;
3824  break;
3825  case 37: // DXGI_FORMAT_R16G16_SNORM:
3826  format = F_rg16;
3827  component_type = T_short;
3828  func = read_dds_level_raw;
3829  break;
3830  case 38: // DXGI_FORMAT_R16G16_SINT:
3831  format = F_rg16i;
3832  component_type = T_short;
3833  func = read_dds_level_raw;
3834  break;
3835  case 40: // DXGI_FORMAT_D32_FLOAT
3836  format = F_depth_component32;
3837  component_type = T_float;
3838  func = read_dds_level_raw;
3839  break;
3840  case 41: // DXGI_FORMAT_R32_FLOAT
3841  format = F_r32;
3842  component_type = T_float;
3843  func = read_dds_level_raw;
3844  break;
3845  case 42: // DXGI_FORMAT_R32_UINT
3846  format = F_r32i;
3847  component_type = T_unsigned_int;
3848  func = read_dds_level_raw;
3849  break;
3850  case 43: // DXGI_FORMAT_R32_SINT
3851  format = F_r32i;
3852  component_type = T_int;
3853  func = read_dds_level_raw;
3854  break;
3855  case 48: // DXGI_FORMAT_R8G8_TYPELESS
3856  case 49: // DXGI_FORMAT_R8G8_UNORM
3857  format = F_rg;
3858  break;
3859  case 50: // DXGI_FORMAT_R8G8_UINT
3860  format = F_rg8i;
3861  break;
3862  case 51: // DXGI_FORMAT_R8G8_SNORM
3863  format = F_rg;
3864  component_type = T_byte;
3865  break;
3866  case 52: // DXGI_FORMAT_R8G8_SINT
3867  format = F_rg8i;
3868  component_type = T_byte;
3869  break;
3870  case 54: // DXGI_FORMAT_R16_FLOAT:
3871  format = F_r16;
3872  component_type = T_half_float;
3873  func = read_dds_level_raw;
3874  break;
3875  case 55: // DXGI_FORMAT_D16_UNORM:
3876  format = F_depth_component16;
3877  component_type = T_unsigned_short;
3878  func = read_dds_level_raw;
3879  break;
3880  case 56: // DXGI_FORMAT_R16_UNORM:
3881  format = F_r16;
3882  component_type = T_unsigned_short;
3883  func = read_dds_level_raw;
3884  break;
3885  case 57: // DXGI_FORMAT_R16_UINT:
3886  format = F_r16i;
3887  component_type = T_unsigned_short;
3888  func = read_dds_level_raw;
3889  break;
3890  case 58: // DXGI_FORMAT_R16_SNORM:
3891  format = F_r16;
3892  component_type = T_short;
3893  func = read_dds_level_raw;
3894  break;
3895  case 59: // DXGI_FORMAT_R16_SINT:
3896  format = F_r16i;
3897  component_type = T_short;
3898  func = read_dds_level_raw;
3899  break;
3900  case 60: // DXGI_FORMAT_R8_TYPELESS
3901  case 61: // DXGI_FORMAT_R8_UNORM
3902  format = F_red;
3903  break;
3904  case 62: // DXGI_FORMAT_R8_UINT
3905  format = F_r8i;
3906  break;
3907  case 63: // DXGI_FORMAT_R8_SNORM
3908  format = F_red;
3909  component_type = T_byte;
3910  break;
3911  case 64: // DXGI_FORMAT_R8_SINT
3912  format = F_r8i;
3913  component_type = T_byte;
3914  break;
3915  case 65: // DXGI_FORMAT_A8_UNORM
3916  format = F_alpha;
3917  break;
3918  case 70: // DXGI_FORMAT_BC1_TYPELESS
3919  case 71: // DXGI_FORMAT_BC1_UNORM
3920  format = F_rgb;
3921  compression = CM_dxt1;
3922  func = read_dds_level_bc1;
3923  break;
3924  case 72: // DXGI_FORMAT_BC1_UNORM_SRGB
3925  format = F_srgb;
3926  compression = CM_dxt1;
3927  func = read_dds_level_bc1;
3928  break;
3929  case 73: // DXGI_FORMAT_BC2_TYPELESS
3930  case 74: // DXGI_FORMAT_BC2_UNORM
3931  format = F_rgba;
3932  compression = CM_dxt3;
3933  func = read_dds_level_bc2;
3934  break;
3935  case 75: // DXGI_FORMAT_BC2_UNORM_SRGB
3936  format = F_srgb_alpha;
3937  compression = CM_dxt3;
3938  func = read_dds_level_bc2;
3939  break;
3940  case 76: // DXGI_FORMAT_BC3_TYPELESS
3941  case 77: // DXGI_FORMAT_BC3_UNORM
3942  format = F_rgba;
3943  compression = CM_dxt5;
3944  func = read_dds_level_bc3;
3945  break;
3946  case 78: // DXGI_FORMAT_BC3_UNORM_SRGB
3947  format = F_srgb_alpha;
3948  compression = CM_dxt5;
3949  func = read_dds_level_bc3;
3950  break;
3951  case 79: // DXGI_FORMAT_BC4_TYPELESS
3952  case 80: // DXGI_FORMAT_BC4_UNORM
3953  format = F_red;
3954  compression = CM_rgtc;
3955  func = read_dds_level_bc4;
3956  break;
3957  case 82: // DXGI_FORMAT_BC5_TYPELESS
3958  case 83: // DXGI_FORMAT_BC5_UNORM
3959  format = F_rg;
3960  compression = CM_rgtc;
3961  func = read_dds_level_bc5;
3962  break;
3963  case 87: // DXGI_FORMAT_B8G8R8A8_UNORM
3964  case 90: // DXGI_FORMAT_B8G8R8A8_TYPELESS
3965  format = F_rgba8;
3966  break;
3967  case 88: // DXGI_FORMAT_B8G8R8X8_UNORM
3968  case 92: // DXGI_FORMAT_B8G8R8X8_TYPELESS
3969  format = F_rgb8;
3970  break;
3971  case 91: // DXGI_FORMAT_B8G8R8A8_UNORM_SRGB
3972  format = F_srgb_alpha;
3973  break;
3974  case 93: // DXGI_FORMAT_B8G8R8X8_UNORM_SRGB
3975  format = F_srgb;
3976  break;
3977  case 115: // DXGI_FORMAT_B4G4R4A4_UNORM
3978  format = F_rgba4;
3979  break;
3980  default:
3981  gobj_cat.error()
3982  << filename << ": unsupported DXGI format " << dxgi_format << ".\n";
3983  return false;
3984  }
3985 
3986  switch (dimension) {
3987  case 2: // DDS_DIMENSION_TEXTURE1D
3988  texture_type = TT_1d_texture;
3989  header.depth = 1;
3990  break;
3991  case 3: // DDS_DIMENSION_TEXTURE2D
3992  if (misc_flag & 0x4) { // DDS_RESOURCE_MISC_TEXTURECUBE
3993  if (array_size > 1) {
3994  texture_type = TT_cube_map_array;
3995  header.depth = array_size * 6;
3996  } else {
3997  texture_type = TT_cube_map;
3998  header.depth = 6;
3999  }
4000  } else {
4001  if (array_size > 1) {
4002  texture_type = TT_2d_texture_array;
4003  header.depth = array_size;
4004  } else {
4005  texture_type = TT_2d_texture;
4006  header.depth = 1;
4007  }
4008  }
4009  break;
4010  case 4: // DDS_DIMENSION_TEXTURE3D
4011  texture_type = TT_3d_texture;
4012  break;
4013  default:
4014  gobj_cat.error()
4015  << filename << ": unsupported dimension.\n";
4016  return false;
4017  }
4018 
4019  } else if (header.pf.pf_flags & DDPF_FOURCC) {
4020  // Some compressed texture format.
4021  if (texture_type == TT_3d_texture) {
4022  gobj_cat.error()
4023  << filename << ": unsupported compression on 3-d texture.\n";
4024  return false;
4025  }
4026 
4027  // Most of the compressed formats support alpha.
4028  format = F_rgba;
4029  switch (header.pf.four_cc) {
4030  case 0x31545844: // 'DXT1', little-endian.
4031  compression = CM_dxt1;
4032  func = read_dds_level_bc1;
4033  format = F_rgbm;
4034  break;
4035  case 0x32545844: // 'DXT2'
4036  compression = CM_dxt2;
4037  func = read_dds_level_bc2;
4038  break;
4039  case 0x33545844: // 'DXT3'
4040  compression = CM_dxt3;
4041  func = read_dds_level_bc2;
4042  break;
4043  case 0x34545844: // 'DXT4'
4044  compression = CM_dxt4;
4045  func = read_dds_level_bc3;
4046  break;
4047  case 0x35545844: // 'DXT5'
4048  compression = CM_dxt5;
4049  func = read_dds_level_bc3;
4050  break;
4051  case 0x31495441: // 'ATI1'
4052  case 0x55344342: // 'BC4U'
4053  compression = CM_rgtc;
4054  func = read_dds_level_bc4;
4055  format = F_red;
4056  break;
4057  case 0x32495441: // 'ATI2'
4058  case 0x55354342: // 'BC5U'
4059  compression = CM_rgtc;
4060  func = read_dds_level_bc5;
4061  format = F_rg;
4062  break;
4063  case 36: // D3DFMT_A16B16G16R16
4064  func = read_dds_level_abgr16;
4065  format = F_rgba16;
4066  component_type = T_unsigned_short;
4067  break;
4068  case 110: // D3DFMT_Q16W16V16U16
4069  func = read_dds_level_abgr16;
4070  format = F_rgba16;
4071  component_type = T_short;
4072  break;
4073  case 113: // D3DFMT_A16B16G16R16F
4074  func = read_dds_level_abgr16;
4075  format = F_rgba16;
4076  component_type = T_half_float;
4077  break;
4078  case 116: // D3DFMT_A32B32G32R32F
4079  func = read_dds_level_abgr32;
4080  format = F_rgba32;
4081  component_type = T_float;
4082  break;
4083  default:
4084  gobj_cat.error()
4085  << filename << ": unsupported texture compression (FourCC: 0x"
4086  << std::hex << header.pf.four_cc << std::dec << ").\n";
4087  return false;
4088  }
4089 
4090  } else {
4091  // An uncompressed texture format.
4092  func = read_dds_level_generic_uncompressed;
4093 
4094  if (header.pf.pf_flags & DDPF_ALPHAPIXELS) {
4095  // An uncompressed format that involves alpha.
4096  format = F_rgba;
4097  if (header.pf.rgb_bitcount == 32 &&
4098  header.pf.r_mask == 0x000000ff &&
4099  header.pf.g_mask == 0x0000ff00 &&
4100  header.pf.b_mask == 0x00ff0000 &&
4101  header.pf.a_mask == 0xff000000U) {
4102  func = read_dds_level_abgr8;
4103  } else if (header.pf.rgb_bitcount == 32 &&
4104  header.pf.r_mask == 0x00ff0000 &&
4105  header.pf.g_mask == 0x0000ff00 &&
4106  header.pf.b_mask == 0x000000ff &&
4107  header.pf.a_mask == 0xff000000U) {
4108  func = read_dds_level_rgba8;
4109 
4110  } else if (header.pf.r_mask != 0 &&
4111  header.pf.g_mask == 0 &&
4112  header.pf.b_mask == 0) {
4113  func = read_dds_level_luminance_uncompressed;
4114  format = F_luminance_alpha;
4115  }
4116  } else {
4117  // An uncompressed format that doesn't involve alpha.
4118  if (header.pf.rgb_bitcount == 24 &&
4119  header.pf.r_mask == 0x00ff0000 &&
4120  header.pf.g_mask == 0x0000ff00 &&
4121  header.pf.b_mask == 0x000000ff) {
4122  func = read_dds_level_bgr8;
4123  } else if (header.pf.rgb_bitcount == 24 &&
4124  header.pf.r_mask == 0x000000ff &&
4125  header.pf.g_mask == 0x0000ff00 &&
4126  header.pf.b_mask == 0x00ff0000) {
4127  func = read_dds_level_rgb8;
4128 
4129  } else if (header.pf.r_mask != 0 &&
4130  header.pf.g_mask == 0 &&
4131  header.pf.b_mask == 0) {
4132  func = read_dds_level_luminance_uncompressed;
4133  format = F_luminance;
4134  }
4135  }
4136  }
4137 
4138  do_setup_texture(cdata, texture_type, header.width, header.height, header.depth,
4139  component_type, format);
4140 
4141  cdata->_orig_file_x_size = cdata->_x_size;
4142  cdata->_orig_file_y_size = cdata->_y_size;
4143  cdata->_compression = compression;
4144  cdata->_ram_image_compression = compression;
4145 
4146  if (!header_only) {
4147  switch (texture_type) {
4148  case TT_3d_texture:
4149  {
4150  // 3-d textures store all the depth slices for mipmap level 0, then
4151  // all the depth slices for mipmap level 1, and so on.
4152  for (int n = 0; n < (int)header.num_levels; ++n) {
4153  int z_size = do_get_expected_mipmap_z_size(cdata, n);
4154  pvector<PTA_uchar> pages;
4155  size_t page_size = 0;
4156  int z;
4157  for (z = 0; z < z_size; ++z) {
4158  PTA_uchar page = func(this, cdata, header, n, in);
4159  if (page.is_null()) {
4160  return false;
4161  }
4162  nassertr(page_size == 0 || page_size == page.size(), false);
4163  page_size = page.size();
4164  pages.push_back(page);
4165  }
4166  // Now reassemble the pages into one big image. Because this is a
4167  // Microsoft format, the images are stacked in reverse order; re-
4168  // reverse them.
4169  PTA_uchar image = PTA_uchar::empty_array(page_size * z_size);
4170  unsigned char *imagep = (unsigned char *)image.p();
4171  for (z = 0; z < z_size; ++z) {
4172  int fz = z_size - 1 - z;
4173  memcpy(imagep + z * page_size, pages[fz].p(), page_size);
4174  }
4175 
4176  do_set_ram_mipmap_image(cdata, n, image, page_size);
4177  }
4178  }
4179  break;
4180 
4181  case TT_cube_map:
4182  {
4183  // Cube maps store all the mipmap levels for face 0, then all the
4184  // mipmap levels for face 1, and so on.
4186  pages.reserve(6);
4187  int z, n;
4188  for (z = 0; z < 6; ++z) {
4189  pages.push_back(pvector<PTA_uchar>());
4190  pvector<PTA_uchar> &levels = pages.back();
4191  levels.reserve(header.num_levels);
4192 
4193  for (n = 0; n < (int)header.num_levels; ++n) {
4194  PTA_uchar image = func(this, cdata, header, n, in);
4195  if (image.is_null()) {
4196  return false;
4197  }
4198  levels.push_back(image);
4199  }
4200  }
4201 
4202  // Now, for each level, reassemble the pages into one big image.
4203  // Because this is a Microsoft format, the levels are arranged in a
4204  // rotated order.
4205  static const int level_remap[6] = {
4206  0, 1, 5, 4, 2, 3
4207  };
4208  for (n = 0; n < (int)header.num_levels; ++n) {
4209  size_t page_size = pages[0][n].size();
4210  PTA_uchar image = PTA_uchar::empty_array(page_size * 6);
4211  unsigned char *imagep = (unsigned char *)image.p();
4212  for (z = 0; z < 6; ++z) {
4213  int fz = level_remap[z];
4214  nassertr(pages[fz][n].size() == page_size, false);
4215  memcpy(imagep + z * page_size, pages[fz][n].p(), page_size);
4216  }
4217 
4218  do_set_ram_mipmap_image(cdata, n, image, page_size);
4219  }
4220  }
4221  break;
4222 
4223  case TT_2d_texture_array:
4224  case TT_cube_map_array: //TODO: rearrange cube map array faces?
4225  {
4226  // Texture arrays store all the mipmap levels for layer 0, then all
4227  // the mipmap levels for layer 1, and so on.
4229  pages.reserve(header.depth);
4230  int z, n;
4231  for (z = 0; z < (int)header.depth; ++z) {
4232  pages.push_back(pvector<PTA_uchar>());
4233  pvector<PTA_uchar> &levels = pages.back();
4234  levels.reserve(header.num_levels);
4235 
4236  for (n = 0; n < (int)header.num_levels; ++n) {
4237  PTA_uchar image = func(this, cdata, header, n, in);
4238  if (image.is_null()) {
4239  return false;
4240  }
4241  levels.push_back(image);
4242  }
4243  }
4244 
4245  // Now, for each level, reassemble the pages into one big image.
4246  for (n = 0; n < (int)header.num_levels; ++n) {
4247  size_t page_size = pages[0][n].size();
4248  PTA_uchar image = PTA_uchar::empty_array(page_size * header.depth);
4249  unsigned char *imagep = (unsigned char *)image.p();
4250  for (z = 0; z < (int)header.depth; ++z) {
4251  nassertr(pages[z][n].size() == page_size, false);
4252  memcpy(imagep + z * page_size, pages[z][n].p(), page_size);
4253  }
4254 
4255  do_set_ram_mipmap_image(cdata, n, image, page_size);
4256  }
4257  }
4258  break;
4259 
4260  default:
4261  // Normal 2-d textures simply store the mipmap levels.
4262  {
4263  for (int n = 0; n < (int)header.num_levels; ++n) {
4264  PTA_uchar image = func(this, cdata, header, n, in);
4265  if (image.is_null()) {
4266  return false;
4267  }
4268  do_set_ram_mipmap_image(cdata, n, image, 0);
4269  }
4270  }
4271  }
4272  cdata->_has_read_pages = true;
4273  cdata->_has_read_mipmaps = true;
4274  cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
4275  }
4276 
4277  if (in.fail()) {
4278  gobj_cat.error()
4279  << filename << ": truncated DDS file.\n";
4280  return false;
4281  }
4282 
4283  cdata->_loaded_from_image = true;
4284  cdata->_loaded_from_txo = true;
4285 
4286  return true;
4287 }
4288 
4289 /**
4290  * Called internally when read() detects a KTX file. Assumes the lock is
4291  * already held.
4292  */
4293 bool Texture::
4294 do_read_ktx_file(CData *cdata, const Filename &fullpath, bool header_only) {
4296 
4297  Filename filename = Filename::binary_filename(fullpath);
4298  PT(VirtualFile) file = vfs->get_file(filename);
4299  if (file == nullptr) {
4300  // No such file.
4301  gobj_cat.error()
4302  << "Could not find " << fullpath << "\n";
4303  return false;
4304  }
4305 
4306  if (gobj_cat.is_debug()) {
4307  gobj_cat.debug()
4308  << "Reading KTX file " << filename << "\n";
4309  }
4310 
4311  istream *in = file->open_read_file(true);
4312  bool success = do_read_ktx(cdata, *in, fullpath, header_only);
4313  vfs->close_read_file(in);
4314 
4315  if (!has_name()) {
4316  set_name(fullpath.get_basename_wo_extension());
4317  }
4318 
4319  cdata->_fullpath = fullpath;
4320  cdata->_alpha_fullpath = Filename();
4321  cdata->_keep_ram_image = false;
4322 
4323  return success;
4324 }
4325 
4326 /**
4327  *
4328  */
4329 bool Texture::
4330 do_read_ktx(CData *cdata, istream &in, const string &filename, bool header_only) {
4331  StreamReader ktx(in);
4332 
4333  unsigned char magic[12];
4334  if (ktx.extract_bytes(magic, 12) != 12 ||
4335  memcmp(magic, "\xABKTX 11\xBB\r\n\x1A\n", 12) != 0) {
4336  gobj_cat.error()
4337  << filename << " is not a KTX file.\n";
4338  return false;
4339  }
4340 
4341  // See: https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/
4342  uint32_t gl_type, /*type_size,*/ gl_format, internal_format, gl_base_format,
4343  width, height, depth, num_array_elements, num_faces, num_mipmap_levels,
4344  kvdata_size;
4345 
4346  bool big_endian;
4347  if (ktx.get_uint32() == 0x04030201) {
4348  big_endian = false;
4349  gl_type = ktx.get_uint32();
4350  /*type_size = */ktx.get_uint32();
4351  gl_format = ktx.get_uint32();
4352  internal_format = ktx.get_uint32();
4353  gl_base_format = ktx.get_uint32();
4354  width = ktx.get_uint32();
4355  height = ktx.get_uint32();
4356  depth = ktx.get_uint32();
4357  num_array_elements = ktx.get_uint32();
4358  num_faces = ktx.get_uint32();
4359  num_mipmap_levels = ktx.get_uint32();
4360  kvdata_size = ktx.get_uint32();
4361  } else {
4362  big_endian = true;
4363  gl_type = ktx.get_be_uint32();
4364  /*type_size = */ktx.get_be_uint32();
4365  gl_format = ktx.get_be_uint32();
4366  internal_format = ktx.get_be_uint32();
4367  gl_base_format = ktx.get_be_uint32();
4368  width = ktx.get_be_uint32();
4369  height = ktx.get_be_uint32();
4370  depth = ktx.get_be_uint32();
4371  num_array_elements = ktx.get_be_uint32();
4372  num_faces = ktx.get_be_uint32();
4373  num_mipmap_levels = ktx.get_be_uint32();
4374  kvdata_size = ktx.get_be_uint32();
4375  }
4376 
4377  // Skip metadata section.
4378  ktx.skip_bytes(kvdata_size);
4379 
4380  ComponentType type;
4381  CompressionMode compression;
4382  Format format;
4383  bool swap_bgr = false;
4384 
4385  if (gl_type == 0 || gl_format == 0) {
4386  // Compressed texture.
4387  if (gl_type > 0 || gl_format > 0) {
4388  gobj_cat.error()
4389  << "Compressed textures must have both type and format set to 0.\n";
4390  return false;
4391  }
4392  type = T_unsigned_byte;
4393  compression = CM_on;
4394 
4395  KTXFormat base_format;
4396  switch ((KTXCompressedFormat)internal_format) {
4397  case KTX_COMPRESSED_RED:
4398  format = F_red;
4399  base_format = KTX_RED;
4400  break;
4401  case KTX_COMPRESSED_RG:
4402  format = F_rg;
4403  base_format = KTX_RG;
4404  break;
4405  case KTX_COMPRESSED_RGB:
4406  format = F_rgb;
4407  base_format = KTX_RGB;
4408  break;
4409  case KTX_COMPRESSED_RGBA:
4410  format = F_rgba;
4411  base_format = KTX_RGBA;
4412  break;
4413  case KTX_COMPRESSED_SRGB:
4414  format = F_srgb;
4415  base_format = KTX_SRGB;
4416  break;
4417  case KTX_COMPRESSED_SRGB_ALPHA:
4418  format = F_srgb_alpha;
4419  base_format = KTX_SRGB_ALPHA;
4420  break;
4421  case KTX_COMPRESSED_RGB_FXT1_3DFX:
4422  format = F_rgb;
4423  base_format = KTX_RGB;
4424  compression = CM_fxt1;
4425  break;
4426  case KTX_COMPRESSED_RGBA_FXT1_3DFX:
4427  format = F_rgba;
4428  base_format = KTX_RGBA;
4429  compression = CM_fxt1;
4430  break;
4431  case KTX_COMPRESSED_RGB_S3TC_DXT1:
4432  format = F_rgb;
4433  base_format = KTX_RGB;
4434  compression = CM_dxt1;
4435  break;
4436  case KTX_COMPRESSED_RGBA_S3TC_DXT1:
4437  format = F_rgbm;
4438  base_format = KTX_RGB;
4439  compression = CM_dxt1;
4440  break;
4441  case KTX_COMPRESSED_RGBA_S3TC_DXT3:
4442  format = F_rgba;
4443  base_format = KTX_RGBA;
4444  compression = CM_dxt3;
4445  break;
4446  case KTX_COMPRESSED_RGBA_S3TC_DXT5:
4447  format = F_rgba;
4448  base_format = KTX_RGBA;
4449  compression = CM_dxt5;
4450  break;
4451  case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1:
4452  format = F_srgb_alpha;
4453  base_format = KTX_SRGB_ALPHA;
4454  compression = CM_dxt1;
4455  break;
4456  case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3:
4457  format = F_srgb_alpha;
4458  base_format = KTX_SRGB_ALPHA;
4459  compression = CM_dxt3;
4460  break;
4461  case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5:
4462  format = F_srgb_alpha;
4463  base_format = KTX_SRGB_ALPHA;
4464  compression = CM_dxt5;
4465  break;
4466  case KTX_COMPRESSED_SRGB_S3TC_DXT1:
4467  format = F_srgb;
4468  base_format = KTX_SRGB;
4469  compression = CM_dxt1;
4470  break;
4471  case KTX_COMPRESSED_RED_RGTC1:
4472  case KTX_COMPRESSED_SIGNED_RED_RGTC1:
4473  format = F_red;
4474  base_format = KTX_RED;
4475  compression = CM_rgtc;
4476  break;
4477  case KTX_COMPRESSED_RG_RGTC2:
4478  case KTX_COMPRESSED_SIGNED_RG_RGTC2:
4479  format = F_rg;
4480  base_format = KTX_RG;
4481  compression = CM_rgtc;
4482  break;
4483  case KTX_ETC1_RGB8:
4484  format = F_rgb;
4485  base_format = KTX_RGB;
4486  compression = CM_etc1;
4487  break;
4488  case KTX_ETC1_SRGB8:
4489  format = F_srgb;
4490  base_format = KTX_SRGB;
4491  compression = CM_etc1;
4492  break;
4493  case KTX_COMPRESSED_RGB8_ETC2:
4494  format = F_rgb;
4495  base_format = KTX_RGB;
4496  compression = CM_etc2;
4497  break;
4498  case KTX_COMPRESSED_SRGB8_ETC2:
4499  format = F_srgb;
4500  base_format = KTX_SRGB;
4501  compression = CM_etc2;
4502  break;
4503  case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4504  format = F_rgbm;
4505  base_format = KTX_RGBA;
4506  compression = CM_etc2;
4507  break;
4508  case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
4509  format = F_rgbm;
4510  base_format = KTX_SRGB8_ALPHA8;
4511  compression = CM_etc2;
4512  break;
4513  case KTX_COMPRESSED_RGBA8_ETC2_EAC:
4514  format = F_rgba;
4515  base_format = KTX_RGBA;
4516  compression = CM_etc2;
4517  break;
4518  case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
4519  format = F_srgb_alpha;
4520  base_format = KTX_SRGB8_ALPHA8;
4521  compression = CM_etc2;
4522  break;
4523  case KTX_COMPRESSED_R11_EAC:
4524  case KTX_COMPRESSED_SIGNED_R11_EAC:
4525  format = F_red;
4526  base_format = KTX_RED;
4527  compression = CM_eac;
4528  break;
4529  case KTX_COMPRESSED_RG11_EAC:
4530  case KTX_COMPRESSED_SIGNED_RG11_EAC:
4531  format = F_rg;
4532  base_format = KTX_RG;
4533  compression = CM_eac;
4534  break;
4535  case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1:
4536  format = F_srgb_alpha;
4537  base_format = KTX_SRGB_ALPHA;
4538  compression = CM_pvr1_2bpp;
4539  break;
4540  case KTX_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1:
4541  format = F_srgb_alpha;
4542  base_format = KTX_SRGB_ALPHA;
4543  compression = CM_pvr1_4bpp;
4544  break;
4545  case KTX_COMPRESSED_RGBA_BPTC_UNORM:
4546  case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM:
4547  case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT:
4548  case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT:
4549  default:
4550  gobj_cat.error()
4551  << filename << " has unsupported compressed internal format " << internal_format << "\n";
4552  return false;
4553  }
4554 
4555  if (base_format != gl_base_format) {
4556  gobj_cat.error()
4557  << filename << " has internal format that is incompatible with base "
4558  "format (0x" << std::hex << gl_base_format << ", expected 0x"
4559  << base_format << std::dec << ")\n";
4560  return false;
4561  }
4562 
4563  } else {
4564  // Uncompressed texture.
4565  compression = CM_off;
4566  switch ((KTXType)gl_type) {
4567  case KTX_BYTE:
4568  type = T_byte;
4569  break;
4570  case KTX_UNSIGNED_BYTE:
4571  type = T_unsigned_byte;
4572  break;
4573  case KTX_SHORT:
4574  type = T_short;
4575  break;
4576  case KTX_UNSIGNED_SHORT:
4577  type = T_unsigned_short;
4578  break;
4579  case KTX_INT:
4580  type = T_int;
4581  break;
4582  case KTX_UNSIGNED_INT:
4583  type = T_unsigned_int;
4584  break;
4585  case KTX_FLOAT:
4586  type = T_float;
4587  break;
4588  case KTX_HALF_FLOAT:
4589  type = T_half_float;
4590  break;
4591  case KTX_UNSIGNED_INT_24_8:
4592  type = T_unsigned_int_24_8;
4593  break;
4594  default:
4595  gobj_cat.error()
4596  << filename << " has unsupported component type " << gl_type << "\n";
4597  return false;
4598  }
4599 
4600  if (gl_format != gl_base_format) {
4601  gobj_cat.error()
4602  << filename << " has mismatched formats: " << gl_format << " != "
4603  << gl_base_format << "\n";
4604  }
4605 
4606  switch (gl_format) {
4607  case KTX_DEPTH_COMPONENT:
4608  switch (internal_format) {
4609  case KTX_DEPTH_COMPONENT:
4610  format = F_depth_component;
4611  break;
4612  case KTX_DEPTH_COMPONENT16:
4613  format = F_depth_component16;
4614  break;
4615  case KTX_DEPTH_COMPONENT24:
4616  format = F_depth_component24;
4617  break;
4618  case KTX_DEPTH_COMPONENT32:
4619  case KTX_DEPTH_COMPONENT32F:
4620  format = F_depth_component32;
4621  break;
4622  default:
4623  format = F_depth_component;
4624  gobj_cat.warning()
4625  << filename << " has unsupported depth component format " << internal_format << "\n";
4626  }
4627  break;
4628 
4629  case KTX_DEPTH_STENCIL:
4630  format = F_depth_stencil;
4631  if (internal_format != KTX_DEPTH_STENCIL &&
4632  internal_format != KTX_DEPTH24_STENCIL8) {
4633  gobj_cat.warning()
4634  << filename << " has unsupported depth stencil format " << internal_format << "\n";
4635  }
4636  break;
4637 
4638  case KTX_RED:
4639  switch (internal_format) {
4640  case KTX_RED:
4641  case KTX_RED_SNORM:
4642  case KTX_R8:
4643  case KTX_R8_SNORM:
4644  format = F_red;
4645  break;
4646  case KTX_R16:
4647  case KTX_R16_SNORM:
4648  case KTX_R16F:
4649  format = F_r16;
4650  break;
4651  case KTX_R32F:
4652  format = F_r32;
4653  break;
4654  default:
4655  format = F_red;
4656  gobj_cat.warning()
4657  << filename << " has unsupported red format " << internal_format << "\n";
4658  }
4659  break;
4660 
4661  case KTX_RED_INTEGER:
4662  switch (internal_format) {
4663  case KTX_R8I:
4664  case KTX_R8UI:
4665  format = F_r8i;
4666  break;
4667  case KTX_R16I:
4668  case KTX_R16UI:
4669  format = F_r16i;
4670  break;
4671  case KTX_R32I:
4672  case KTX_R32UI:
4673  format = F_r32i;
4674  break;
4675  default:
4676  gobj_cat.error()
4677  << filename << " has unsupported red integer format " << internal_format << "\n";
4678  return false;
4679  }
4680  break;
4681 
4682  case KTX_GREEN:
4683  format = F_green;
4684  if (internal_format != KTX_GREEN) {
4685  gobj_cat.warning()
4686  << filename << " has unsupported green format " << internal_format << "\n";
4687  }
4688  break;
4689 
4690  case KTX_BLUE:
4691  format = F_blue;
4692  if (internal_format != KTX_BLUE) {
4693  gobj_cat.warning()
4694  << filename << " has unsupported blue format " << internal_format << "\n";
4695  }
4696  break;
4697 
4698  case KTX_RG:
4699  switch (internal_format) {
4700  case KTX_RG:
4701  case KTX_RG_SNORM:
4702  case KTX_RG8:
4703  case KTX_RG8_SNORM:
4704  format = F_rg;
4705  break;
4706  case KTX_RG16:
4707  case KTX_RG16_SNORM:
4708  case KTX_RG16F:
4709  format = F_rg16;
4710  break;
4711  case KTX_RG32F:
4712  format = F_rg32;
4713  break;
4714  default:
4715  format = F_rg;
4716  gobj_cat.warning()
4717  << filename << " has unsupported RG format " << internal_format << "\n";
4718  }
4719  break;
4720 
4721  case KTX_RG_INTEGER:
4722  switch (internal_format) {
4723  case KTX_RG8I:
4724  case KTX_RG8UI:
4725  format = F_rg8i;
4726  break;
4727  case KTX_RG16I:
4728  case KTX_RG16UI:
4729  format = F_rg16i;
4730  break;
4731  case KTX_RG32I:
4732  case KTX_RG32UI:
4733  format = F_rg32i;
4734  break;
4735  default:
4736  gobj_cat.error()
4737  << filename << " has unsupported RG integer format " << internal_format << "\n";
4738  return false;
4739  }
4740  break;
4741 
4742  case KTX_RGB:
4743  swap_bgr = true;
4744  case KTX_BGR:
4745  switch (internal_format) {
4746  case KTX_RGB:
4747  case KTX_RGB_SNORM:
4748  format = F_rgb;
4749  break;
4750  case KTX_RGB5:
4751  format = F_rgb5;
4752  break;
4753  case KTX_RGB12:
4754  format = F_rgb12;
4755  break;
4756  case KTX_R3_G3_B2:
4757  format = F_rgb332;
4758  break;
4759  case KTX_RGB9_E5:
4760  format = F_rgb9_e5;
4761  break;
4762  case KTX_R11F_G11F_B10F:
4763  format = F_r11_g11_b10;
4764  break;
4765  case KTX_RGB8:
4766  case KTX_RGB8_SNORM:
4767  format = F_rgb8;
4768  break;
4769  case KTX_RGB16:
4770  case KTX_RGB16_SNORM:
4771  case KTX_RGB16F:
4772  format = F_rgb16;
4773  break;
4774  case KTX_RGB32F:
4775  format = F_rgb32;
4776  break;
4777  case KTX_SRGB:
4778  case KTX_SRGB8:
4779  format = F_srgb;
4780  break;
4781  default:
4782  format = F_rgb;
4783  gobj_cat.warning()
4784  << filename << " has unsupported RGB format " << internal_format << "\n";
4785  }
4786  break;
4787 
4788  case KTX_RGB_INTEGER:
4789  swap_bgr = true;
4790  case KTX_BGR_INTEGER:
4791  switch (internal_format) {
4792  case KTX_RGB8I:
4793  case KTX_RGB8UI:
4794  format = F_rgb8i;
4795  break;
4796  case KTX_RGB16I:
4797  case KTX_RGB16UI:
4798  format = F_rgb16i;
4799  break;
4800  case KTX_RGB32I:
4801  case KTX_RGB32UI:
4802  format = F_rgb32i;
4803  break;
4804  default:
4805  gobj_cat.error()
4806  << filename << " has unsupported RGB integer format " << internal_format << "\n";
4807  return false;
4808  }
4809  break;
4810 
4811  case KTX_RGBA:
4812  swap_bgr = true;
4813  case KTX_BGRA:
4814  switch (internal_format) {
4815  case KTX_RGBA:
4816  case KTX_RGBA_SNORM:
4817  format = F_rgba;
4818  break;
4819  case KTX_RGBA4:
4820  format = F_rgba4;
4821  break;
4822  case KTX_RGB5_A1:
4823  format = F_rgba5;
4824  break;
4825  case KTX_RGBA12:
4826  format = F_rgba12;
4827  break;
4828  case KTX_RGB10_A2:
4829  format = F_rgb10_a2;
4830  break;
4831  case KTX_RGBA8:
4832  case KTX_RGBA8_SNORM:
4833  format = F_rgba8;
4834  break;
4835  case KTX_RGBA16:
4836  case KTX_RGBA16_SNORM:
4837  case KTX_RGBA16F:
4838  format = F_rgba16;
4839  break;
4840  case KTX_RGBA32F:
4841  format = F_rgba32;
4842  break;
4843  case KTX_SRGB_ALPHA:
4844  case KTX_SRGB8_ALPHA8:
4845  format = F_srgb_alpha;
4846  break;
4847  default:
4848  format = F_rgba;
4849  gobj_cat.warning()
4850  << filename << " has unsupported RGBA format " << internal_format << "\n";
4851  }
4852  break;
4853  break;
4854 
4855  case KTX_RGBA_INTEGER:
4856  swap_bgr = true;
4857  case KTX_BGRA_INTEGER:
4858  switch (internal_format) {
4859  case KTX_RGBA8I:
4860  case KTX_RGBA8UI:
4861  format = F_rgba8i;
4862  break;
4863  case KTX_RGBA16I:
4864  case KTX_RGBA16UI:
4865  format = F_rgba16i;
4866  break;
4867  case KTX_RGBA32I:
4868  case KTX_RGBA32UI:
4869  format = F_rgba32i;
4870  break;
4871  default:
4872  gobj_cat.error()
4873  << filename << " has unsupported RGBA integer format " << internal_format << "\n";
4874  return false;
4875  }
4876  break;
4877 
4878  case KTX_LUMINANCE:
4879  format = F_luminance;
4880  break;
4881 
4882  case KTX_LUMINANCE_ALPHA:
4883  format = F_luminance_alpha;
4884  break;
4885 
4886  case KTX_ALPHA:
4887  format = F_alpha;
4888  break;
4889 
4890  case KTX_STENCIL_INDEX:
4891  default:
4892  gobj_cat.error()
4893  << filename << " has unsupported format " << gl_format << "\n";
4894  return false;
4895  }
4896  }
4897 
4898  TextureType texture_type;
4899  if (depth > 0) {
4900  texture_type = TT_3d_texture;
4901 
4902  } else if (num_faces > 1) {
4903  if (num_faces != 6) {
4904  gobj_cat.error()
4905  << filename << " has " << num_faces << " cube map faces, expected 6\n";
4906  return false;
4907  }
4908  if (width != height) {
4909  gobj_cat.error()
4910  << filename << " is cube map, but does not have square dimensions\n";
4911  return false;
4912  }
4913  if (num_array_elements > 0) {
4914  depth = num_array_elements * 6;
4915  texture_type = TT_cube_map_array;
4916  } else {
4917  depth = 6;
4918  texture_type = TT_cube_map;
4919  }
4920 
4921  } else if (height > 0) {
4922  if (num_array_elements > 0) {
4923  depth = num_array_elements;
4924  texture_type = TT_2d_texture_array;
4925  } else {
4926  depth = 1;
4927  texture_type = TT_2d_texture;
4928  }
4929 
4930  } else if (width > 0) {
4931  depth = 1;
4932  if (num_array_elements > 0) {
4933  height = num_array_elements;
4934  texture_type = TT_1d_texture_array;
4935  } else {
4936  height = 1;
4937  texture_type = TT_1d_texture;
4938  }
4939 
4940  } else {
4941  gobj_cat.error()
4942  << filename << " has zero size\n";
4943  return false;
4944  }
4945 
4946  do_setup_texture(cdata, texture_type, width, height, depth, type, format);
4947 
4948  cdata->_orig_file_x_size = cdata->_x_size;
4949  cdata->_orig_file_y_size = cdata->_y_size;
4950  cdata->_compression = compression;
4951  cdata->_ram_image_compression = compression;
4952 
4953  if (!header_only) {
4954  bool generate_mipmaps = false;
4955  if (num_mipmap_levels == 0) {
4956  generate_mipmaps = true;
4957  num_mipmap_levels = 1;
4958  }
4959 
4960  for (uint32_t n = 0; n < num_mipmap_levels; ++n) {
4961  uint32_t image_size;
4962  if (big_endian) {
4963  image_size = ktx.get_be_uint32();
4964  } else {
4965  image_size = ktx.get_uint32();
4966  }
4967  PTA_uchar image;
4968 
4969  if (compression == CM_off) {
4970  uint32_t row_size = do_get_expected_mipmap_x_size(cdata, (int)n) * cdata->_num_components * cdata->_component_width;
4971  uint32_t num_rows = do_get_expected_mipmap_y_size(cdata, (int)n) * do_get_expected_mipmap_z_size(cdata, (int)n);
4972  uint32_t row_padded = (row_size + 3) & ~3;
4973 
4974  if (image_size == row_size * num_rows) {
4975  if (row_padded != row_size) {
4976  // Someone tightly packed the image. This is invalid, but because
4977  // we like it tightly packed too, we'll read it anyway.
4978  gobj_cat.warning()
4979  << filename << " does not have proper row padding for mipmap "
4980  "level " << n << "\n";
4981  }
4982  image = PTA_uchar::empty_array(image_size);
4983  ktx.extract_bytes(image.p(), image_size);
4984 
4985  } else if (image_size != row_padded * num_rows) {
4986  gobj_cat.error()
4987  << filename << " has invalid image size " << image_size
4988  << " for mipmap level " << n << " (expected "
4989  << row_padded * num_rows << ")\n";
4990  return false;
4991 
4992  } else {
4993  // Read it row by row.
4994  image = PTA_uchar::empty_array(row_size * num_rows);
4995  uint32_t skip = row_padded - row_size;
4996  unsigned char *p = image.p();
4997  for (uint32_t row = 0; row < num_rows; ++row) {
4998  ktx.extract_bytes(p, row_size);
4999  ktx.skip_bytes(skip);
5000  p += row_size;
5001  }
5002  }
5003 
5004  // Swap red and blue channels if necessary to match Panda conventions.
5005  if (swap_bgr) {
5006  unsigned char *begin = image.p();
5007  const unsigned char *end = image.p() + image.size();
5008  size_t skip = cdata->_num_components;
5009  nassertr(skip == 3 || skip == 4, false);
5010 
5011  switch (cdata->_component_width) {
5012  case 1:
5013  for (unsigned char *p = begin; p < end; p += skip) {
5014  swap(p[0], p[2]);
5015  }
5016  break;
5017  case 2:
5018  for (short *p = (short *)begin; p < (short *)end; p += skip) {
5019  swap(p[0], p[2]);
5020  }
5021  break;
5022  case 4:
5023  for (int *p = (int *)begin; p < (int *)end; p += skip) {
5024  swap(p[0], p[2]);
5025  }
5026  break;
5027  default:
5028  nassert_raise("unexpected channel count");
5029  return false;
5030  }
5031  }
5032 
5033  do_set_ram_mipmap_image(cdata, (int)n, std::move(image),
5034  row_size * do_get_expected_mipmap_y_size(cdata, (int)n));
5035 
5036  } else {
5037  // Compressed image. We'll trust that the file has the right size.
5038  image = PTA_uchar::empty_array(image_size);
5039  ktx.extract_bytes(image.p(), image_size);
5040  do_set_ram_mipmap_image(cdata, (int)n, std::move(image), image_size / depth);
5041  }
5042 
5043  ktx.skip_bytes(3 - ((image_size + 3) & 3));
5044  }
5045 
5046  cdata->_has_read_pages = true;
5047  cdata->_has_read_mipmaps = true;
5048  cdata->_num_mipmap_levels_read = cdata->_ram_images.size();
5049 
5050  if (generate_mipmaps) {
5051  do_generate_ram_mipmap_images(cdata, false);
5052  }
5053  }
5054 
5055  if (in.fail()) {
5056  gobj_cat.error()
5057  << filename << ": truncated KTX file.\n";
5058  return false;
5059  }
5060 
5061  cdata->_loaded_from_image = true;
5062  cdata->_loaded_from_txo = true;
5063 
5064  return true;
5065 }
5066 
5067 /**
5068  * Internal method to write a series of pages and/or mipmap levels to disk
5069  * files.
5070  */
5071 bool Texture::
5072 do_write(CData *cdata,
5073  const Filename &fullpath, int z, int n, bool write_pages, bool write_mipmaps) {
5074  if (is_txo_filename(fullpath)) {
5075  if (!do_has_bam_rawdata(cdata)) {
5076  do_get_bam_rawdata(cdata);
5077  }
5078  nassertr(do_has_bam_rawdata(cdata), false);
5079  return do_write_txo_file(cdata, fullpath);
5080  }
5081 
5082  if (!do_has_uncompressed_ram_image(cdata)) {
5083  do_get_uncompressed_ram_image(cdata);
5084  }
5085 
5086  nassertr(do_has_ram_mipmap_image(cdata, n), false);
5087  nassertr(cdata->_ram_image_compression == CM_off, false);
5088 
5089  if (write_pages && write_mipmaps) {
5090  // Write a sequence of pages * mipmap levels.
5091  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5092  int num_levels = cdata->_ram_images.size();
5093 
5094  for (int n = 0; n < num_levels; ++n) {
5095  int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
5096 
5097  for (z = 0; z < num_pages; ++z) {
5098  Filename n_pattern = Filename::pattern_filename(fullpath_pattern.get_filename_index(z));
5099 
5100  if (!n_pattern.has_hash()) {
5101  gobj_cat.error()
5102  << "Filename requires two different hash sequences: " << fullpath
5103  << "\n";
5104  return false;
5105  }
5106 
5107  if (!do_write_one(cdata, n_pattern.get_filename_index(n), z, n)) {
5108  return false;
5109  }
5110  }
5111  }
5112 
5113  } else if (write_pages) {
5114  // Write a sequence of pages.
5115  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5116  if (!fullpath_pattern.has_hash()) {
5117  gobj_cat.error()
5118  << "Filename requires a hash mark: " << fullpath
5119  << "\n";
5120  return false;
5121  }
5122 
5123  int num_pages = cdata->_z_size * cdata->_num_views;
5124  for (z = 0; z < num_pages; ++z) {
5125  if (!do_write_one(cdata, fullpath_pattern.get_filename_index(z), z, n)) {
5126  return false;
5127  }
5128  }
5129 
5130  } else if (write_mipmaps) {
5131  // Write a sequence of mipmap images.
5132  Filename fullpath_pattern = Filename::pattern_filename(fullpath);
5133  if (!fullpath_pattern.has_hash()) {
5134  gobj_cat.error()
5135  << "Filename requires a hash mark: " << fullpath
5136  << "\n";
5137  return false;
5138  }
5139 
5140  int num_levels = cdata->_ram_images.size();
5141  for (int n = 0; n < num_levels; ++n) {
5142  if (!do_write_one(cdata, fullpath_pattern.get_filename_index(n), z, n)) {
5143  return false;
5144  }
5145  }
5146 
5147  } else {
5148  // Write a single file.
5149  if (!do_write_one(cdata, fullpath, z, n)) {
5150  return false;
5151  }
5152  }
5153 
5154  return true;
5155 }
5156 
5157 /**
5158  * Internal method to write the indicated page and mipmap level to a disk
5159  * image file.
5160  */
5161 bool Texture::
5162 do_write_one(CData *cdata, const Filename &fullpath, int z, int n) {
5163  if (!do_has_ram_mipmap_image(cdata, n)) {
5164  return false;
5165  }
5166 
5167  nassertr(cdata->_ram_image_compression == CM_off, false);
5168 
5169  bool success;
5170  if (cdata->_component_type == T_float) {
5171  // Writing a floating-point texture.
5172  PfmFile pfm;
5173  if (!do_store_one(cdata, pfm, z, n)) {
5174  return false;
5175  }
5176  success = pfm.write(fullpath);
5177  } else {
5178  // Writing a normal, integer texture.
5179  PNMImage pnmimage;
5180  if (!do_store_one(cdata, pnmimage, z, n)) {
5181  return false;
5182  }
5183  success = pnmimage.write(fullpath);
5184  }
5185 
5186  if (!success) {
5187  gobj_cat.error()
5188  << "Texture::write() - couldn't write: " << fullpath << endl;
5189  return false;
5190  }
5191 
5192  return true;
5193 }
5194 
5195 /**
5196  * Internal method to copy a page and/or mipmap level to a PNMImage.
5197  */
5198 bool Texture::
5199 do_store_one(CData *cdata, PNMImage &pnmimage, int z, int n) {
5200  // First, reload the ram image if necessary.
5201  do_get_uncompressed_ram_image(cdata);
5202 
5203  if (!do_has_ram_mipmap_image(cdata, n)) {
5204  return false;
5205  }
5206 
5207  nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5208  nassertr(cdata->_ram_image_compression == CM_off, false);
5209 
5210  if (cdata->_component_type == T_float) {
5211  // PNMImage by way of PfmFile.
5212  PfmFile pfm;
5213  bool success = convert_to_pfm(pfm,
5214  do_get_expected_mipmap_x_size(cdata, n),
5215  do_get_expected_mipmap_y_size(cdata, n),
5216  cdata->_num_components, cdata->_component_width,
5217  cdata->_ram_images[n]._image,
5218  do_get_ram_mipmap_page_size(cdata, n), z);
5219  if (!success) {
5220  return false;
5221  }
5222  return pfm.store(pnmimage);
5223  }
5224 
5225  return convert_to_pnmimage(pnmimage,
5226  do_get_expected_mipmap_x_size(cdata, n),
5227  do_get_expected_mipmap_y_size(cdata, n),
5228  cdata->_num_components, cdata->_component_type,
5229  is_srgb(cdata->_format),
5230  cdata->_ram_images[n]._image,
5231  do_get_ram_mipmap_page_size(cdata, n), z);
5232 }
5233 
5234 /**
5235  * Internal method to copy a page and/or mipmap level to a PfmFile.
5236  */
5237 bool Texture::
5238 do_store_one(CData *cdata, PfmFile &pfm, int z, int n) {
5239  // First, reload the ram image if necessary.
5240  do_get_uncompressed_ram_image(cdata);
5241 
5242  if (!do_has_ram_mipmap_image(cdata, n)) {
5243  return false;
5244  }
5245 
5246  nassertr(z >= 0 && z < do_get_expected_mipmap_num_pages(cdata, n), false);
5247  nassertr(cdata->_ram_image_compression == CM_off, false);
5248 
5249  if (cdata->_component_type != T_float) {
5250  // PfmFile by way of PNMImage.
5251  PNMImage pnmimage;
5252  bool success =
5253  convert_to_pnmimage(pnmimage,
5254  do_get_expected_mipmap_x_size(cdata, n),
5255  do_get_expected_mipmap_y_size(cdata, n),
5256  cdata->_num_components, cdata->_component_type,
5257  is_srgb(cdata->_format),
5258  cdata->_ram_images[n]._image,
5259  do_get_ram_mipmap_page_size(cdata, n), z);
5260  if (!success) {
5261  return false;
5262  }
5263  return pfm.load(pnmimage);
5264  }
5265 
5266  return convert_to_pfm(pfm,
5267  do_get_expected_mipmap_x_size(cdata, n),
5268  do_get_expected_mipmap_y_size(cdata, n),
5269  cdata->_num_components, cdata->_component_width,
5270  cdata->_ram_images[n]._image,
5271  do_get_ram_mipmap_page_size(cdata, n), z);
5272 }
5273 
5274 /**
5275  * Called internally when write() detects a txo filename.
5276  */
5277 bool Texture::
5278 do_write_txo_file(const CData *cdata, const Filename &fullpath) const {
5280  Filename filename = Filename::binary_filename(fullpath);
5281  ostream *out = vfs->open_write_file(filename, true, true);
5282  if (out == nullptr) {
5283  gobj_cat.error()
5284  << "Unable to open " << filename << "\n";
5285  return false;
5286  }
5287 
5288  bool success = do_write_txo(cdata, *out, fullpath);
5289  vfs->close_write_file(out);
5290  return success;
5291 }
5292 
5293 /**
5294  *
5295  */
5296 bool Texture::
5297 do_write_txo(const CData *cdata, ostream &out, const string &filename) const {
5298  DatagramOutputFile dout;
5299 
5300  if (!dout.open(out, filename)) {
5301  gobj_cat.error()
5302  << "Could not write texture object: " << filename << "\n";
5303  return false;
5304  }
5305 
5306  if (!dout.write_header(_bam_header)) {
5307  gobj_cat.error()
5308  << "Unable to write to " << filename << "\n";
5309  return false;
5310  }
5311 
5312  BamWriter writer(&dout);
5313  if (!writer.init()) {
5314  return false;
5315  }
5316 
5317  writer.set_file_texture_mode(BamWriter::BTM_rawdata);
5318 
5319  if (!writer.write_object(this)) {
5320  return false;
5321  }
5322 
5323  if (!do_has_bam_rawdata(cdata)) {
5324  gobj_cat.error()
5325  << get_name() << " does not have ram image\n";
5326  return false;
5327  }
5328 
5329  return true;
5330 }
5331 
5332 /**
5333  * If the texture has a ram image already, this acquires the CData write lock
5334  * and returns it.
5335  *
5336  * If the texture lacks a ram image, this performs do_reload_ram_image(), but
5337  * without holding the lock on this particular Texture object, to avoid
5338  * holding the lock across what might be a slow operation. Instead, the
5339  * reload is performed in a copy of the texture object, and then the lock is
5340  * acquired and the data is copied in.
5341  *
5342  * In any case, the return value is a locked CData object, which must be
5343  * released with an explicit call to release_write(). The CData object will
5344  * have a ram image unless for some reason do_reload_ram_image() fails.
5345  */
5346 Texture::CData *Texture::
5347 unlocked_ensure_ram_image(bool allow_compression) {
5348  Thread *current_thread = Thread::get_current_thread();
5349 
5350  // First, wait for any other threads that might be simultaneously performing
5351  // the same operation.
5352  MutexHolder holder(_lock);
5353  while (_reloading) {
5354  _cvar.wait();
5355  }
5356 
5357  // Then make sure we still need to reload before continuing.
5358  const CData *cdata = _cycler.read(current_thread);
5359  bool has_ram_image = do_has_ram_image(cdata);
5360  if (has_ram_image && !allow_compression && cdata->_ram_image_compression != Texture::CM_off) {
5361  // If we don't want compression, but the ram image we have is pre-
5362  // compressed, we don't consider it.
5363  has_ram_image = false;
5364  }
5365  if (has_ram_image || !do_can_reload(cdata)) {
5366  // We don't need to reload after all, or maybe we can't reload anyway.
5367  // Return, but elevate the lock first, as we promised.
5368  return _cycler.elevate_read_upstream(cdata, false, current_thread);
5369  }
5370 
5371  // We need to reload.
5372  nassertr(!_reloading, nullptr);
5373  _reloading = true;
5374 
5375  PT(Texture) tex = do_make_copy(cdata);
5376  _cycler.release_read(cdata);
5377  _lock.unlock();
5378 
5379  // Perform the actual reload in a copy of the texture, while our own mutex
5380  // is left unlocked.
5381  CDWriter cdata_tex(tex->_cycler, true);
5382  tex->do_reload_ram_image(cdata_tex, allow_compression);
5383 
5384  _lock.lock();
5385 
5386  CData *cdataw = _cycler.write_upstream(false, current_thread);
5387 
5388  // Rather than calling do_assign(), which would copy *all* of the reloaded
5389  // texture's properties over, we only copy in the ones which are relevant to
5390  // the ram image. This way, if the properties have changed during the
5391  // reload (for instance, because we reloaded a txo), it won't contaminate
5392  // the original texture.
5393  cdataw->_orig_file_x_size = cdata_tex->_orig_file_x_size;
5394  cdataw->_orig_file_y_size = cdata_tex->_orig_file_y_size;
5395 
5396  // If any of *these* properties have changed, the texture has changed in
5397  // some fundamental way. Update it appropriately.
5398  if (cdata_tex->_x_size != cdataw->_x_size ||
5399  cdata_tex->_y_size != cdataw->_y_size ||
5400  cdata_tex->_z_size != cdataw->_z_size ||
5401  cdata_tex->_num_views != cdataw->_num_views ||
5402  cdata_tex->_num_components != cdataw->_num_components ||
5403  cdata_tex->_component_width != cdataw->_component_width ||
5404  cdata_tex->_texture_type != cdataw->_texture_type ||
5405  cdata_tex->_component_type != cdataw->_component_type) {
5406 
5407  cdataw->_x_size = cdata_tex->_x_size;
5408  cdataw->_y_size = cdata_tex->_y_size;
5409  cdataw->_z_size = cdata_tex->_z_size;
5410  cdataw->_num_views = cdata_tex->_num_views;
5411 
5412  cdataw->_num_components = cdata_tex->_num_components;
5413  cdataw->_component_width = cdata_tex->_component_width;
5414  cdataw->_texture_type = cdata_tex->_texture_type;
5415  cdataw->_format = cdata_tex->_format;
5416  cdataw->_component_type = cdata_tex->_component_type;
5417 
5418  cdataw->inc_properties_modified();
5419  cdataw->inc_image_modified();
5420  }
5421 
5422  cdataw->_keep_ram_image = cdata_tex->_keep_ram_image;
5423  cdataw->_ram_image_compression = cdata_tex->_ram_image_compression;
5424  cdataw->_ram_images = cdata_tex->_ram_images;
5425 
5426  nassertr(_reloading, nullptr);
5427  _reloading = false;
5428 
5429  // We don't generally increment the cdata->_image_modified semaphore,
5430  // because this is just a reload, and presumably the image hasn't changed
5431  // (unless we hit the if condition above).
5432 
5433  _cvar.notify_all();
5434 
5435  // Return the still-locked cdata.
5436  return cdataw;
5437 }
5438 
5439 /**
5440  * Called when the Texture image is required but the ram image is not
5441  * available, this will reload it from disk or otherwise do whatever is
5442  * required to make it available, if possible.
5443  *
5444  * Assumes the lock is already held. The lock will be held during the
5445  * duration of this operation.
5446  */
5447 void Texture::
5448 do_reload_ram_image(CData *cdata, bool allow_compression) {
5450  PT(BamCacheRecord) record;
5451 
5452  if (!do_has_compression(cdata)) {
5453  allow_compression = false;
5454  }
5455 
5456  if ((cache->get_cache_textures() || (allow_compression && cache->get_cache_compressed_textures())) && !textures_header_only) {
5457  // See if the texture can be found in the on-disk cache, if it is active.
5458 
5459  record = cache->lookup(cdata->_fullpath, "txo");
5460  if (record != nullptr &&
5461  record->has_data()) {
5462  PT(Texture) tex = DCAST(Texture, record->get_data());
5463 
5464  // But don't use the cache record if the config parameters have changed,
5465  // and we want a different-sized texture now.
5466  int x_size = cdata->_orig_file_x_size;
5467  int y_size = cdata->_orig_file_y_size;
5468  do_adjust_this_size(cdata, x_size, y_size, cdata->_filename.get_basename(), true);
5469  if (x_size != tex->get_x_size() || y_size != tex->get_y_size()) {
5470  if (gobj_cat.is_debug()) {
5471  gobj_cat.debug()
5472  << "Cached texture " << *this << " has size "
5473  << tex->get_x_size() << " x " << tex->get_y_size()
5474  << " instead of " << x_size << " x " << y_size
5475  << "; ignoring cache.\n";
5476  }
5477  } else {
5478  // Also don't keep the cached version if it's compressed but we want
5479  // uncompressed.
5480  if (!allow_compression && tex->get_ram_image_compression() != Texture::CM_off) {
5481  if (gobj_cat.is_debug()) {
5482  gobj_cat.debug()
5483  << "Cached texture " << *this
5484  << " is compressed in cache; ignoring cache.\n";
5485  }
5486  } else {
5487  gobj_cat.info()
5488  << "Texture " << get_name() << " reloaded from disk cache\n";
5489  // We don't want to replace all the texture parameters--for
5490  // instance, we don't want to change the filter type or the border
5491  // color or anything--we just want to get the image and necessary
5492  // associated parameters.
5493  CDReader cdata_tex(tex->_cycler);
5494  cdata->_x_size = cdata_tex->_x_size;
5495  cdata->_y_size = cdata_tex->_y_size;
5496  if (cdata->_num_components != cdata_tex->_num_components) {
5497  cdata->_num_components = cdata_tex->_num_components;
5498  cdata->_format = cdata_tex->_format;
5499  }
5500  cdata->_component_type = cdata_tex->_component_type;
5501  cdata->_compression = cdata_tex->_compression;
5502  cdata->_ram_image_compression = cdata_tex->_ram_image_compression;
5503  cdata->_ram_images = cdata_tex->_ram_images;
5504  cdata->_loaded_from_image = true;
5505 
5506  bool was_compressed = (cdata->_ram_image_compression != CM_off);
5507  if (do_consider_auto_process_ram_image(cdata, uses_mipmaps(), allow_compression)) {
5508  bool is_compressed = (cdata->_ram_image_compression != CM_off);
5509  if (!was_compressed && is_compressed &&
5510  cache->get_cache_compressed_textures()) {
5511  // We've re-compressed the image after loading it from the
5512  // cache. To keep the cache current, rewrite it to the cache
5513  // now, in its newly compressed form.
5514  record->set_data(this, this);
5515  cache->store(record);
5516  }
5517  }
5518 
5519  return;
5520  }
5521  }
5522  }
5523  }
5524 
5525  gobj_cat.info()
5526  << "Reloading texture " << get_name() << "\n";
5527 
5528  int z = 0;
5529  int n = 0;
5530 
5531  if (cdata->_has_read_pages) {
5532  z = cdata->_z_size;
5533  }
5534  if (cdata->_has_read_mipmaps) {
5535  n = cdata->_num_mipmap_levels_read;
5536  }
5537 
5538  cdata->_loaded_from_image = false;
5539  Format orig_format = cdata->_format;
5540  int orig_num_components = cdata->_num_components;
5541 
5542  LoaderOptions options;
5543  if (allow_compression) {
5544  options.set_texture_flags(LoaderOptions::TF_preload |
5545  LoaderOptions::TF_allow_compression);
5546  } else {
5547  options.set_texture_flags(LoaderOptions::TF_preload);
5548  }
5549  do_read(cdata, cdata->_fullpath, cdata->_alpha_fullpath,
5550  cdata->_primary_file_num_channels, cdata->_alpha_file_channel,
5551  z, n, cdata->_has_read_pages, cdata->_has_read_mipmaps, options, nullptr);
5552 
5553  if (orig_num_components == cdata->_num_components) {
5554  // Restore the original format, in case it was needlessly changed during
5555  // the reload operation.
5556  cdata->_format = orig_format;
5557  }
5558 
5559  if (do_has_ram_image(cdata) && record != nullptr) {
5560  if (cache->get_cache_textures() || (cdata->_ram_image_compression != CM_off && cache->get_cache_compressed_textures())) {
5561  // Update the cache.
5562  if (record != nullptr) {
5563  record->add_dependent_file(cdata->_fullpath);
5564  }
5565  record->set_data(this, this);
5566  cache->store(record);
5567  }
5568  }
5569 }
5570 
5571 /**
5572  * This is called internally to uniquify the ram image pointer without
5573  * updating cdata->_image_modified.
5574  */
5575 PTA_uchar Texture::
5576 do_modify_ram_image(CData *cdata) {
5577  if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty() ||
5578  cdata->_ram_image_compression != CM_off) {
5579  do_make_ram_image(cdata);
5580  } else {
5581  do_clear_ram_mipmap_images(cdata);
5582  }
5583  return cdata->_ram_images[0]._image;
5584 }
5585 
5586 /**
5587  * This is called internally to make a new ram image without updating
5588  * cdata->_image_modified.
5589  */
5590 PTA_uchar Texture::
5591 do_make_ram_image(CData *cdata) {
5592  int image_size = do_get_expected_ram_image_size(cdata);
5593  cdata->_ram_images.clear();
5594  cdata->_ram_images.push_back(RamImage());
5595  cdata->_ram_images[0]._page_size = do_get_expected_ram_page_size(cdata);
5596  cdata->_ram_images[0]._image = PTA_uchar::empty_array(image_size, get_class_type());
5597  cdata->_ram_images[0]._pointer_image = nullptr;
5598  cdata->_ram_image_compression = CM_off;
5599 
5600  if (cdata->_has_clear_color) {
5601  // Fill the image with the clear color.
5602  unsigned char pixel[16];
5603  const int pixel_size = do_get_clear_data(cdata, pixel);
5604  nassertr(pixel_size > 0, cdata->_ram_images[0]._image);
5605 
5606  unsigned char *image_data = cdata->_ram_images[0]._image;
5607  for (int i = 0; i < image_size; i += pixel_size) {
5608  memcpy(image_data + i, pixel, pixel_size);
5609  }
5610  }
5611 
5612  return cdata->_ram_images[0]._image;
5613 }
5614 
5615 /**
5616  * Replaces the current system-RAM image with the new data. If compression is
5617  * not CM_off, it indicates that the new data is already pre-compressed in the
5618  * indicated format.
5619  *
5620  * This does *not* affect keep_ram_image.
5621  */
5622 void Texture::
5623 do_set_ram_image(CData *cdata, CPTA_uchar image, Texture::CompressionMode compression,
5624  size_t page_size) {
5625  nassertv(compression != CM_default);
5626  nassertv(compression != CM_off || image.size() == do_get_expected_ram_image_size(cdata));
5627  if (cdata->_ram_images.empty()) {
5628  cdata->_ram_images.push_back(RamImage());
5629  } else {
5630  do_clear_ram_mipmap_images(cdata);
5631  }
5632  if (page_size == 0) {
5633  page_size = image.size();
5634  }
5635  if (cdata->_ram_images[0]._image != image ||
5636  cdata->_ram_images[0]._page_size != page_size ||
5637  cdata->_ram_image_compression != compression) {
5638  cdata->_ram_images[0]._image = image.cast_non_const();
5639  cdata->_ram_images[0]._page_size = page_size;
5640  cdata->_ram_images[0]._pointer_image = nullptr;
5641  cdata->_ram_image_compression = compression;
5642  cdata->inc_image_modified();
5643  }
5644 }
5645 
5646 /**
5647  * This is called internally to uniquify the nth mipmap image pointer without
5648  * updating cdata->_image_modified.
5649  */
5650 PTA_uchar Texture::
5651 do_modify_ram_mipmap_image(CData *cdata, int n) {
5652  nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar());
5653 
5654  if (n >= (int)cdata->_ram_images.size() ||
5655  cdata->_ram_images[n]._image.empty()) {
5656  do_make_ram_mipmap_image(cdata, n);
5657  }
5658  return cdata->_ram_images[n]._image;
5659 }
5660 
5661 /**
5662  *
5663  */
5664 PTA_uchar Texture::
5665 do_make_ram_mipmap_image(CData *cdata, int n) {
5666  nassertr(cdata->_ram_image_compression == CM_off, PTA_uchar(get_class_type()));
5667 
5668  while (n >= (int)cdata->_ram_images.size()) {
5669  cdata->_ram_images.push_back(RamImage());
5670  }
5671 
5672  size_t image_size = do_get_expected_ram_mipmap_image_size(cdata, n);
5673  cdata->_ram_images[n]._image = PTA_uchar::empty_array(image_size, get_class_type());
5674  cdata->_ram_images[n]._pointer_image = nullptr;
5675  cdata->_ram_images[n]._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
5676 
5677  if (cdata->_has_clear_color) {
5678  // Fill the image with the clear color.
5679  unsigned char pixel[16];
5680  const size_t pixel_size = (size_t)do_get_clear_data(cdata, pixel);
5681  nassertr(pixel_size > 0, cdata->_ram_images[n]._image);
5682 
5683  unsigned char *image_data = cdata->_ram_images[n]._image;
5684  for (size_t i = 0; i < image_size; i += pixel_size) {
5685  memcpy(image_data + i, pixel, pixel_size);
5686  }
5687  }
5688 
5689  return cdata->_ram_images[n]._image;
5690 }
5691 
5692 /**
5693  *
5694  */
5695 void Texture::
5696 do_set_ram_mipmap_image(CData *cdata, int n, CPTA_uchar image, size_t page_size) {
5697  nassertv(cdata->_ram_image_compression != CM_off || image.size() == do_get_expected_ram_mipmap_image_size(cdata, n));
5698 
5699  while (n >= (int)cdata->_ram_images.size()) {
5700  cdata->_ram_images.push_back(RamImage());
5701  }
5702  if (page_size == 0) {
5703  page_size = image.size();
5704  }
5705 
5706  if (cdata->_ram_images[n]._image != image ||
5707  cdata->_ram_images[n]._page_size != page_size) {
5708  cdata->_ram_images[n]._image = image.cast_non_const();
5709  cdata->_ram_images[n]._pointer_image = nullptr;
5710  cdata->_ram_images[n]._page_size = page_size;
5711  cdata->inc_image_modified();
5712  }
5713 }
5714 
5715 /**
5716  * Returns a string with a single pixel representing the clear color of the
5717  * texture in the format of this texture.
5718  *
5719  * In other words, to create an uncompressed RAM texture filled with the clear
5720  * color, it should be initialized with this string repeated for every pixel.
5721  */
5722 size_t Texture::
5723 do_get_clear_data(const CData *cdata, unsigned char *into) const {
5724  nassertr(cdata->_has_clear_color, 0);
5725 
5726  int num_components = cdata->_num_components;
5727  nassertr(num_components > 0, 0);
5728  nassertr(num_components <= 4, 0);
5729 
5730  LVecBase4 clear_value = cdata->_clear_color;
5731 
5732  // Swap red and blue components.
5733  if (num_components >= 3) {
5734  std::swap(clear_value[0], clear_value[2]);
5735  }
5736 
5737  switch (cdata->_component_type) {
5738  case T_unsigned_byte:
5739  if (is_srgb(cdata->_format)) {
5740  xel color;
5741  xelval alpha;
5742  encode_sRGB_uchar(clear_value, color, alpha);
5743  switch (num_components) {
5744  case 4: into[3] = (unsigned char)alpha;
5745  case 3: into[2] = (unsigned char)color.b;
5746  case 2: into[1] = (unsigned char)color.g;
5747  case 1: into[0] = (unsigned char)color.r;
5748  }
5749  } else {
5750  LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5751  scaled *= 255;
5752  for (int i = 0; i < num_components; ++i) {
5753  into[i] = (unsigned char)scaled[i];
5754  }
5755  }
5756  break;
5757 
5758  case T_unsigned_short:
5759  {
5760  LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor::zero());
5761  scaled *= 65535;
5762  for (int i = 0; i < num_components; ++i) {
5763  ((unsigned short *)into)[i] = (unsigned short)scaled[i];
5764  }
5765  break;
5766  }
5767 
5768  case T_float:
5769  for (int i = 0; i < num_components; ++i) {
5770  ((float *)into)[i] = clear_value[i];
5771  }
5772  break;
5773 
5774  case T_unsigned_int_24_8:
5775  nassertr(num_components == 1, 0);
5776  *((unsigned int *)into) =
5777  ((unsigned int)(clear_value[0] * 16777215) << 8) +
5778  (unsigned int)max(min(clear_value[1], (PN_stdfloat)255), (PN_stdfloat)0);
5779  break;
5780 
5781  case T_int:
5782  // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5783  // normalization here, either.
5784  for (int i = 0; i < num_components; ++i) {
5785  ((int *)into)[i] = (int)clear_value[i];
5786  }
5787  break;
5788 
5789  case T_byte:
5790  {
5791  LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5792  scaled *= 127;
5793  for (int i = 0; i < num_components; ++i) {
5794  ((signed char *)into)[i] = (signed char)scaled[i];
5795  }
5796  break;
5797  }
5798 
5799  case T_short:
5800  {
5801  LColor scaled = clear_value.fmin(LColor(1)).fmax(LColor(-1));
5802  scaled *= 32767;
5803  for (int i = 0; i < num_components; ++i) {
5804  ((short *)into)[i] = (short)scaled[i];
5805  }
5806  break;
5807  }
5808 
5809  case T_half_float:
5810  for (int i = 0; i < num_components; ++i) {
5811  union {
5812  uint32_t ui;
5813  float uf;
5814  } v;
5815  v.uf = clear_value[i];
5816  uint16_t sign = ((v.ui & 0x80000000u) >> 16u);
5817  uint32_t mantissa = (v.ui & 0x007fffffu);
5818  uint16_t exponent = (uint16_t)std::min(std::max((int)((v.ui & 0x7f800000u) >> 23u) - 112, 0), 31);
5819  mantissa += (mantissa & 0x00001000u) << 1u;
5820  ((uint16_t *)into)[i] = (uint16_t)(sign | ((exponent << 10u) | (mantissa >> 13u)));
5821  }
5822  break;
5823 
5824  case T_unsigned_int:
5825  // Note: there are no 32-bit UNORM textures. Therefore, we don't do any
5826  // normalization here, either.
5827  for (int i = 0; i < num_components; ++i) {
5828  ((unsigned int *)into)[i] = (unsigned int)clear_value[i];
5829  }
5830  }
5831 
5832  return num_components * cdata->_component_width;
5833 }
5834 
5835 /**
5836  * Should be called after a texture has been loaded into RAM, this considers
5837  * generating mipmaps and/or compressing the RAM image.
5838  *
5839  * Returns true if the image was modified by this operation, false if it
5840  * wasn't.
5841  */
5842 bool Texture::
5843 consider_auto_process_ram_image(bool generate_mipmaps, bool allow_compression) {
5844  CDWriter cdata(_cycler, false);
5845  return do_consider_auto_process_ram_image(cdata, generate_mipmaps, allow_compression);
5846 }
5847 
5848 /**
5849  * Should be called after a texture has been loaded into RAM, this considers
5850  * generating mipmaps and/or compressing the RAM image.
5851  *
5852  * Returns true if the image was modified by this operation, false if it
5853  * wasn't.
5854  */
5855 bool Texture::
5856 do_consider_auto_process_ram_image(CData *cdata, bool generate_mipmaps,
5857  bool allow_compression) {
5858  bool modified = false;
5859 
5860  if (generate_mipmaps && !driver_generate_mipmaps &&
5861  cdata->_ram_images.size() == 1) {
5862  do_generate_ram_mipmap_images(cdata, false);
5863  modified = true;
5864  }
5865 
5866  if (allow_compression && !driver_compress_textures) {
5867  CompressionMode compression = cdata->_compression;
5868  if (compression == CM_default && compressed_textures) {
5869  compression = CM_on;
5870  }
5871  if (compression != CM_off && cdata->_ram_image_compression == CM_off) {
5873  if (do_compress_ram_image(cdata, compression, QL_default, gsg)) {
5874  if (gobj_cat.is_debug()) {
5875  gobj_cat.debug()
5876  << "Compressed " << get_name() << " with "
5877  << cdata->_ram_image_compression << "\n";
5878  }
5879  modified = true;
5880  }
5881  }
5882  }
5883 
5884  return modified;
5885 }
5886 
5887 /**
5888  *
5889  */
5890 bool Texture::
5891 do_compress_ram_image(CData *cdata, Texture::CompressionMode compression,
5892  Texture::QualityLevel quality_level,
5894  nassertr(compression != CM_off, false);
5895 
5896  if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
5897  return false;
5898  }
5899 
5900  if (compression == CM_on) {
5901  // Select an appropriate compression mode automatically.
5902  switch (cdata->_format) {
5903  case Texture::F_rgbm:
5904  case Texture::F_rgb:
5905  case Texture::F_rgb5:
5906  case Texture::F_rgba5:
5907  case Texture::F_rgb8:
5908  case Texture::F_rgb12:
5909  case Texture::F_rgb332:
5910  case Texture::F_rgb16:
5911  case Texture::F_rgb32:
5912  case Texture::F_rgb10_a2:
5913  if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt1)) {
5914  compression = CM_dxt1;
5915  } else if (gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5916  compression = CM_dxt3;
5917  } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5918  compression = CM_dxt5;
5919  } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5920  compression = CM_etc2;
5921  } else if (gsg->get_supports_compressed_texture_format(CM_etc1)) {
5922  compression = CM_etc1;
5923  }
5924  break;
5925 
5926  case Texture::F_rgba4:
5927  if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt3)) {
5928  compression = CM_dxt3;
5929  } else if (gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5930  compression = CM_dxt5;
5931  } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5932  compression = CM_etc2;
5933  }
5934  break;
5935 
5936  case Texture::F_rgba:
5937  case Texture::F_rgba8:
5938  case Texture::F_rgba12:
5939  case Texture::F_rgba16:
5940  case Texture::F_rgba32:
5941  if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_dxt5)) {
5942  compression = CM_dxt5;
5943  } else if (gsg->get_supports_compressed_texture_format(CM_etc2)) {
5944  compression = CM_etc2;
5945  }
5946  break;
5947 
5948  case Texture::F_red:
5949  case Texture::F_rg:
5950  if (gsg == nullptr || gsg->get_supports_compressed_texture_format(CM_rgtc)) {
5951  compression = CM_rgtc;
5952  } else if (gsg->get_supports_compressed_texture_format(CM_eac)) {
5953  compression = CM_eac;
5954  }
5955  break;
5956 
5957  default:
5958  break;
5959  }
5960  }
5961 
5962  // Choose an appropriate quality level.
5963  if (quality_level == Texture::QL_default) {
5964  quality_level = cdata->_quality_level;
5965  }
5966  if (quality_level == Texture::QL_default) {
5967  quality_level = texture_quality_level;
5968  }
5969 
5970  if (compression == CM_rgtc) {
5971  // We should compress RGTC ourselves, as squish does not support it.
5972  if (cdata->_component_type != T_unsigned_byte) {
5973  return false;
5974  }
5975 
5976  if (!do_has_all_ram_mipmap_images(cdata)) {
5977  // If we're about to compress the RAM image, we should ensure that we
5978  // have all of the mipmap levels first.
5979  do_generate_ram_mipmap_images(cdata, false);
5980  }
5981 
5982  RamImages compressed_ram_images;
5983  compressed_ram_images.resize(cdata->_ram_images.size());
5984 
5985  for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
5986  const RamImage *uncompressed_image = &cdata->_ram_images[n];
5987 
5988  int x_size = do_get_expected_mipmap_x_size(cdata, n);
5989  int y_size = do_get_expected_mipmap_y_size(cdata, n);
5990  int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
5991 
5992  // It is important that we handle image sizes that aren't a multiple of
5993  // the block size, since this method may be used to compress mipmaps,
5994  // which go all the way to 1x1. Pad the image if necessary.
5995  RamImage temp_image;
5996  if ((x_size | y_size) & 0x3) {
5997  int virtual_x_size = x_size;
5998  int virtual_y_size = y_size;
5999  x_size = (x_size + 3) & ~0x3;
6000  y_size = (y_size + 3) & ~0x3;
6001 
6002  temp_image._page_size = x_size * y_size * cdata->_num_components;
6003  temp_image._image = PTA_uchar::empty_array(temp_image._page_size * num_pages);
6004 
6005  for (int z = 0; z < num_pages; ++z) {
6006  unsigned char *dest = temp_image._image.p() + z * temp_image._page_size;
6007  unsigned const char *src = uncompressed_image->_image.p() + z * uncompressed_image->_page_size;
6008 
6009  for (int y = 0; y < virtual_y_size; ++y) {
6010  memcpy(dest, src, virtual_x_size);
6011  src += virtual_x_size;
6012  dest += x_size;
6013  }
6014  }
6015 
6016  uncompressed_image = &temp_image;
6017  }
6018 
6019  // Create a new image to hold the compressed texture pages.
6020  RamImage &compressed_image = compressed_ram_images[n];
6021  compressed_image._page_size = (x_size * y_size * cdata->_num_components) >> 1;
6022  compressed_image._image = PTA_uchar::empty_array(compressed_image._page_size * num_pages);
6023 
6024  if (cdata->_num_components == 1) {
6025  do_compress_ram_image_bc4(*uncompressed_image, compressed_image,
6026  x_size, y_size, num_pages);
6027  } else if (cdata->_num_components == 2) {
6028  do_compress_ram_image_bc5(*uncompressed_image, compressed_image,
6029  x_size, y_size, num_pages);
6030  } else {
6031  // Invalid.
6032  return false;
6033  }
6034  }
6035 
6036  cdata->_ram_images.swap(compressed_ram_images);
6037  cdata->_ram_image_compression = CM_rgtc;
6038  return true;
6039  }
6040 
6041 #ifdef HAVE_SQUISH
6042  if (cdata->_texture_type != TT_3d_texture &&
6043  cdata->_texture_type != TT_2d_texture_array &&
6044  cdata->_component_type == T_unsigned_byte) {
6045  int squish_flags = 0;
6046  switch (compression) {
6047  case CM_dxt1:
6048  squish_flags |= squish::kDxt1;
6049  break;
6050 
6051  case CM_dxt3:
6052  squish_flags |= squish::kDxt3;
6053  break;
6054 
6055  case CM_dxt5:
6056  squish_flags |= squish::kDxt5;
6057  break;
6058 
6059  default:
6060  break;
6061  }
6062 
6063  if (squish_flags != 0) {
6064  // This compression mode is supported by squish; use it.
6065  switch (quality_level) {
6066  case QL_fastest:
6067  squish_flags |= squish::kColourRangeFit;
6068  break;
6069 
6070  case QL_normal:
6071  // ColourClusterFit is just too slow for everyday use.
6072  squish_flags |= squish::kColourRangeFit;
6073  // squish_flags |= squish::kColourClusterFit;
6074  break;
6075 
6076  case QL_best:
6077  squish_flags |= squish::kColourIterativeClusterFit;
6078  break;
6079 
6080  default:
6081  break;
6082  }
6083 
6084  if (do_squish(cdata, compression, squish_flags)) {
6085  return true;
6086  }
6087  }
6088  }
6089 #endif // HAVE_SQUISH
6090 
6091  return false;
6092 }
6093 
6094 /**
6095  *
6096  */
6097 bool Texture::
6098 do_uncompress_ram_image(CData *cdata) {
6099  nassertr(!cdata->_ram_images.empty(), false);
6100 
6101  if (cdata->_ram_image_compression == CM_rgtc) {
6102  // We should decompress RGTC ourselves, as squish doesn't support it.
6103  RamImages uncompressed_ram_images;
6104  uncompressed_ram_images.resize(cdata->_ram_images.size());
6105 
6106  for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
6107  const RamImage &compressed_image = cdata->_ram_images[n];
6108 
6109  int x_size = do_get_expected_mipmap_x_size(cdata, n);
6110  int y_size = do_get_expected_mipmap_y_size(cdata, n);
6111  int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
6112 
6113  RamImage &uncompressed_image = uncompressed_ram_images[n];
6114  uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
6115  uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
6116 
6117  if (cdata->_num_components == 1) {
6118  do_uncompress_ram_image_bc4(compressed_image, uncompressed_image,
6119  x_size, y_size, num_pages);
6120  } else if (cdata->_num_components == 2) {
6121  do_uncompress_ram_image_bc5(compressed_image, uncompressed_image,
6122  x_size, y_size, num_pages);
6123  } else {
6124  // Invalid.
6125  return false;
6126  }
6127  }
6128  cdata->_ram_images.swap(uncompressed_ram_images);
6129  cdata->_ram_image_compression = CM_off;
6130  return true;
6131  }
6132 
6133 #ifdef HAVE_SQUISH
6134  if (cdata->_texture_type != TT_3d_texture &&
6135  cdata->_texture_type != TT_2d_texture_array &&
6136  cdata->_component_type == T_unsigned_byte) {
6137  int squish_flags = 0;
6138  switch (cdata->_ram_image_compression) {
6139  case CM_dxt1:
6140  squish_flags |= squish::kDxt1;
6141  break;
6142 
6143  case CM_dxt3:
6144  squish_flags |= squish::kDxt3;
6145  break;
6146 
6147  case CM_dxt5:
6148  squish_flags |= squish::kDxt5;
6149  break;
6150 
6151  default:
6152  break;
6153  }
6154 
6155  if (squish_flags != 0) {
6156  // This compression mode is supported by squish; use it.
6157  if (do_unsquish(cdata, squish_flags)) {
6158  return true;
6159  }
6160  }
6161  }
6162 #endif // HAVE_SQUISH
6163  return false;
6164 }
6165 
6166 /**
6167  * Compresses a RAM image using BC4 compression.
6168  */
6169 void Texture::
6170 do_compress_ram_image_bc4(const RamImage &uncompressed_image,
6171  RamImage &compressed_image,
6172  int x_size, int y_size, int num_pages) {
6173  int x_blocks = (x_size >> 2);
6174  int y_blocks = (y_size >> 2);
6175 
6176  // NB. This algorithm isn't fully optimal, since it doesn't try to make use
6177  // of the secondary interpolation mode supported by BC4. This is not
6178  // important for most textures, but it may be added in the future.
6179 
6180  nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 <= uncompressed_image._page_size);
6181  nassertv((size_t)x_size * (size_t)y_size == uncompressed_image._page_size);
6182 
6183  static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6184 
6185  for (int z = 0; z < num_pages; ++z) {
6186  unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6187  unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6188 
6189  // Convert one 4 x 4 block at a time.
6190  for (int y = 0; y < y_blocks; ++y) {
6191  for (int x = 0; x < x_blocks; ++x) {
6192  int a, b, c, d;
6193  float fac, add;
6194  unsigned char minv, maxv;
6195  unsigned const char *blk = src;
6196 
6197  // Find the minimum and maximum value in the block.
6198  minv = blk[0];
6199  maxv = blk[0];
6200  minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6201  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6202  minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6203  blk += x_size;
6204  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6205  minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6206  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6207  minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6208  blk += x_size;
6209  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6210  minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6211  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6212  minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6213  blk += x_size;
6214  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6215  minv = min(blk[1], minv); maxv = max(blk[1], maxv);
6216  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6217  minv = min(blk[3], minv); maxv = max(blk[3], maxv);
6218 
6219  // Now calculate the index for each pixel.
6220  blk = src;
6221  if (maxv > minv) {
6222  fac = 7.5f / (maxv - minv);
6223  } else {
6224  fac = 0;
6225  }
6226  add = -minv * fac;
6227  a = (remap[(int)(blk[0] * fac + add)])
6228  | (remap[(int)(blk[1] * fac + add)] << 3)
6229  | (remap[(int)(blk[2] * fac + add)] << 6)
6230  | (remap[(int)(blk[3] * fac + add)] << 9);
6231  blk += x_size;
6232  b = (remap[(int)(blk[0] * fac + add)] << 4)
6233  | (remap[(int)(blk[1] * fac + add)] << 7)
6234  | (remap[(int)(blk[2] * fac + add)] << 10)
6235  | (remap[(int)(blk[3] * fac + add)] << 13);
6236  blk += x_size;
6237  c = (remap[(int)(blk[0] * fac + add)])
6238  | (remap[(int)(blk[1] * fac + add)] << 3)
6239  | (remap[(int)(blk[2] * fac + add)] << 6)
6240  | (remap[(int)(blk[3] * fac + add)] << 9);
6241  blk += x_size;
6242  d = (remap[(int)(blk[0] * fac + add)] << 4)
6243  | (remap[(int)(blk[1] * fac + add)] << 7)
6244  | (remap[(int)(blk[2] * fac + add)] << 10)
6245  | (remap[(int)(blk[3] * fac + add)] << 13);
6246 
6247  *(dest++) = maxv;
6248  *(dest++) = minv;
6249  *(dest++) = a & 0xff;
6250  *(dest++) = (a >> 8) | (b & 0xf0);
6251  *(dest++) = b >> 8;
6252  *(dest++) = c & 0xff;
6253  *(dest++) = (c >> 8) | (d & 0xf0);
6254  *(dest++) = d >> 8;
6255 
6256  // Advance to the beginning of the next 4x4 block.
6257  src += 4;
6258  }
6259  src += x_size * 3;
6260  }
6262  }
6263 }
6264 
6265 /**
6266  * Compresses a RAM image using BC5 compression.
6267  */
6268 void Texture::
6269 do_compress_ram_image_bc5(const RamImage &uncompressed_image,
6270  RamImage &compressed_image,
6271  int x_size, int y_size, int num_pages) {
6272  int x_blocks = (x_size >> 2);
6273  int y_blocks = (y_size >> 2);
6274  int stride = x_size * 2;
6275 
6276  // BC5 uses the same compression algorithm as BC4, except repeated for two
6277  // channels.
6278 
6279  nassertv((size_t)x_blocks * (size_t)y_blocks * 4 * 4 * 2 <= uncompressed_image._page_size);
6280  nassertv((size_t)stride * (size_t)y_size == uncompressed_image._page_size);
6281 
6282  static const int remap[] = {1, 7, 6, 5, 4, 3, 2, 0};
6283 
6284  for (int z = 0; z < num_pages; ++z) {
6285  unsigned char *dest = compressed_image._image.p() + z * compressed_image._page_size;
6286  unsigned const char *src = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6287 
6288  // Convert one 4 x 4 block at a time.
6289  for (int y = 0; y < y_blocks; ++y) {
6290  for (int x = 0; x < x_blocks; ++x) {
6291  int a, b, c, d;
6292  float fac, add;
6293  unsigned char minv, maxv;
6294  unsigned const char *blk = src;
6295 
6296  // Find the minimum and maximum red value in the block.
6297  minv = blk[0];
6298  maxv = blk[0];
6299  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6300  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6301  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6302  blk += stride;
6303  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6304  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6305  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6306  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6307  blk += stride;
6308  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6309  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6310  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6311  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6312  blk += stride;
6313  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6314  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6315  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6316  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6317 
6318  // Now calculate the index for each pixel.
6319  if (maxv > minv) {
6320  fac = 7.5f / (maxv - minv);
6321  } else {
6322  fac = 0;
6323  }
6324  add = -minv * fac;
6325  blk = src;
6326  a = (remap[(int)(blk[0] * fac + add)])
6327  | (remap[(int)(blk[2] * fac + add)] << 3)
6328  | (remap[(int)(blk[4] * fac + add)] << 6)
6329  | (remap[(int)(blk[6] * fac + add)] << 9);
6330  blk += stride;
6331  b = (remap[(int)(blk[0] * fac + add)] << 4)
6332  | (remap[(int)(blk[2] * fac + add)] << 7)
6333  | (remap[(int)(blk[4] * fac + add)] << 10)
6334  | (remap[(int)(blk[6] * fac + add)] << 13);
6335  blk += stride;
6336  c = (remap[(int)(blk[0] * fac + add)])
6337  | (remap[(int)(blk[2] * fac + add)] << 3)
6338  | (remap[(int)(blk[4] * fac + add)] << 6)
6339  | (remap[(int)(blk[6] * fac + add)] << 9);
6340  blk += stride;
6341  d = (remap[(int)(blk[0] * fac + add)] << 4)
6342  | (remap[(int)(blk[2] * fac + add)] << 7)
6343  | (remap[(int)(blk[4] * fac + add)] << 10)
6344  | (remap[(int)(blk[6] * fac + add)] << 13);
6345 
6346  *(dest++) = maxv;
6347  *(dest++) = minv;
6348  *(dest++) = a & 0xff;
6349  *(dest++) = (a >> 8) | (b & 0xf0);
6350  *(dest++) = b >> 8;
6351  *(dest++) = c & 0xff;
6352  *(dest++) = (c >> 8) | (d & 0xf0);
6353  *(dest++) = d >> 8;
6354 
6355  // Find the minimum and maximum green value in the block.
6356  blk = src + 1;
6357  minv = blk[0];
6358  maxv = blk[0];
6359  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6360  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6361  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6362  blk += stride;
6363  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6364  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6365  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6366  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6367  blk += stride;
6368  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6369  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6370  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6371  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6372  blk += stride;
6373  minv = min(blk[0], minv); maxv = max(blk[0], maxv);
6374  minv = min(blk[2], minv); maxv = max(blk[2], maxv);
6375  minv = min(blk[4], minv); maxv = max(blk[4], maxv);
6376  minv = min(blk[6], minv); maxv = max(blk[6], maxv);
6377 
6378  // Now calculate the index for each pixel.
6379  if (maxv > minv) {
6380  fac = 7.5f / (maxv - minv);
6381  } else {
6382  fac = 0;
6383  }
6384  add = -minv * fac;
6385  blk = src + 1;
6386  a = (remap[(int)(blk[0] * fac + add)])
6387  | (remap[(int)(blk[2] * fac + add)] << 3)
6388  | (remap[(int)(blk[4] * fac + add)] << 6)
6389  | (remap[(int)(blk[6] * fac + add)] << 9);
6390  blk += stride;
6391  b = (remap[(int)(blk[0] * fac + add)] << 4)
6392  | (remap[(int)(blk[2] * fac + add)] << 7)
6393  | (remap[(int)(blk[4] * fac + add)] << 10)
6394  | (remap[(int)(blk[6] * fac + add)] << 13);
6395  blk += stride;
6396  c = (remap[(int)(blk[0] * fac + add)])
6397  | (remap[(int)(blk[2] * fac + add)] << 3)
6398  | (remap[(int)(blk[4] * fac + add)] << 6)
6399  | (remap[(int)(blk[6] * fac + add)] << 9);
6400  blk += stride;
6401  d = (remap[(int)(blk[0] * fac + add)] << 4)
6402  | (remap[(int)(blk[2] * fac + add)] << 7)
6403  | (remap[(int)(blk[4] * fac + add)] << 10)
6404  | (remap[(int)(blk[6] * fac + add)] << 13);
6405 
6406  *(dest++) = maxv;
6407  *(dest++) = minv;
6408  *(dest++) = a & 0xff;
6409  *(dest++) = (a >> 8) | (b & 0xf0);
6410  *(dest++) = b >> 8;
6411  *(dest++) = c & 0xff;
6412  *(dest++) = (c >> 8) | (d & 0xf0);
6413  *(dest++) = d >> 8;
6414 
6415  // Advance to the beginning of the next 4x4 block.
6416  src += 8;
6417  }
6418  src += stride * 3;
6419  }
6421  }
6422 }
6423 
6424 /**
6425  * Decompresses a RAM image compressed using BC4.
6426  */
6427 void Texture::
6428 do_uncompress_ram_image_bc4(const RamImage &compressed_image,
6429  RamImage &uncompressed_image,
6430  int x_size, int y_size, int num_pages) {
6431  int x_blocks = (x_size >> 2);
6432  int y_blocks = (y_size >> 2);
6433 
6434  for (int z = 0; z < num_pages; ++z) {
6435  unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6436  unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6437 
6438  // Unconvert one 4 x 4 block at a time.
6439  uint8_t tbl[8];
6440  for (int y = 0; y < y_blocks; ++y) {
6441  for (int x = 0; x < x_blocks; ++x) {
6442  unsigned char *blk = dest;
6443  tbl[0] = src[0];
6444  tbl[1] = src[1];
6445  if (tbl[0] > tbl[1]) {
6446  tbl[2] = (tbl[0] * 6 + tbl[1] * 1) / 7.0f;
6447  tbl[3] = (tbl[0] * 5 + tbl[1] * 2) / 7.0f;
6448  tbl[4] = (tbl[0] * 4 + tbl[1] * 3) / 7.0f;
6449  tbl[5] = (tbl[0] * 3 + tbl[1] * 4) / 7.0f;
6450  tbl[6] = (tbl[0] * 2 + tbl[1] * 5) / 7.0f;
6451  tbl[7] = (tbl[0] * 1 + tbl[1] * 6) / 7.0f;
6452  } else {
6453  tbl[2] = (tbl[0] * 4 + tbl[1] * 1) / 5.0f;
6454  tbl[3] = (tbl[0] * 3 + tbl[1] * 2) / 5.0f;
6455  tbl[4] = (tbl[0] * 2 + tbl[1] * 3) / 5.0f;
6456  tbl[5] = (tbl[0] * 1 + tbl[1] * 4) / 5.0f;
6457  tbl[6] = 0;
6458  tbl[7] = 255;
6459  }
6460  int v = src[2] + (src[3] << 8) + (src[4] << 16);
6461  blk[0] = tbl[v & 0x7];
6462  blk[1] = tbl[(v & 0x000038) >> 3];
6463  blk[2] = tbl[(v & 0x0001c0) >> 6];
6464  blk[3] = tbl[(v & 0x000e00) >> 9];
6465  blk += x_size;
6466  blk[0] = tbl[(v & 0x007000) >> 12];
6467  blk[1] = tbl[(v & 0x038000) >> 15];
6468  blk[2] = tbl[(v & 0x1c0000) >> 18];
6469  blk[3] = tbl[(v & 0xe00000) >> 21];
6470  blk += x_size;
6471  v = src[5] + (src[6] << 8) + (src[7] << 16);
6472  blk[0] = tbl[v & 0x7];
6473  blk[1] = tbl[(v & 0x000038) >> 3];
6474  blk[2] = tbl[(v & 0x0001c0) >> 6];
6475  blk[3] = tbl[(v & 0x000e00) >> 9];
6476  blk += x_size;
6477  blk[0] = tbl[(v & 0x007000) >> 12];
6478  blk[1] = tbl[(v & 0x038000) >> 15];
6479  blk[2] = tbl[(v & 0x1c0000) >> 18];
6480  blk[3] = tbl[(v & 0xe00000) >> 21];
6481  src += 8;
6482  dest += 4;
6483  }
6484  dest += x_size * 3;
6485  }
6487  }
6488 }
6489 
6490 /**
6491  * Decompresses a RAM image compressed using BC5.
6492  */
6493 void Texture::
6494 do_uncompress_ram_image_bc5(const RamImage &compressed_image,
6495  RamImage &uncompressed_image,
6496  int x_size, int y_size, int num_pages) {
6497  int x_blocks = (x_size >> 2);
6498  int y_blocks = (y_size >> 2);
6499  int stride = x_size * 2;
6500 
6501  for (int z = 0; z < num_pages; ++z) {
6502  unsigned char *dest = uncompressed_image._image.p() + z * uncompressed_image._page_size;
6503  unsigned const char *src = compressed_image._image.p() + z * compressed_image._page_size;
6504 
6505  // Unconvert one 4 x 4 block at a time.
6506  uint8_t red[8];
6507  uint8_t grn[8];
6508  for (int y = 0; y < y_blocks; ++y) {
6509  for (int x = 0; x < x_blocks; ++x) {
6510  unsigned char *blk = dest;
6511  red[0] = src[0];
6512  red[1] = src[1];
6513  if (red[0] > red[1]) {
6514  red[2] = (red[0] * 6 + red[1] * 1) / 7.0f;
6515  red[3] = (red[0] * 5 + red[1] * 2) / 7.0f;
6516  red[4] = (red[0] * 4 + red[1] * 3) / 7.0f;
6517  red[5] = (red[0] * 3 + red[1] * 4) / 7.0f;
6518  red[6] = (red[0] * 2 + red[1] * 5) / 7.0f;
6519  red[7] = (red[0] * 1 + red[1] * 6) / 7.0f;
6520  } else {
6521  red[2] = (red[0] * 4 + red[1] * 1) / 5.0f;
6522  red[3] = (red[0] * 3 + red[1] * 2) / 5.0f;
6523  red[4] = (red[0] * 2 + red[1] * 3) / 5.0f;
6524  red[5] = (red[0] * 1 + red[1] * 4) / 5.0f;
6525  red[6] = 0;
6526  red[7] = 255;
6527  }
6528  grn[0] = src[8];
6529  grn[1] = src[9];
6530  if (grn[0] > grn[1]) {
6531  grn[2] = (grn[0] * 6 + grn[1] * 1) / 7.0f;
6532  grn[3] = (grn[0] * 5 + grn[1] * 2) / 7.0f;
6533  grn[4] = (grn[0] * 4 + grn[1] * 3) / 7.0f;
6534  grn[5] = (grn[0] * 3 + grn[1] * 4) / 7.0f;
6535  grn[6] = (grn[0] * 2 + grn[1] * 5) / 7.0f;
6536  grn[7] = (grn[0] * 1 + grn[1] * 6) / 7.0f;
6537  } else {
6538  grn[2] = (grn[0] * 4 + grn[1] * 1) / 5.0f;
6539  grn[3] = (grn[0] * 3 + grn[1] * 2) / 5.0f;
6540  grn[4] = (grn[0] * 2 + grn[1] * 3) / 5.0f;
6541  grn[5] = (grn[0] * 1 + grn[1] * 4) / 5.0f;
6542  grn[6] = 0;
6543  grn[7] = 255;
6544  }
6545  int r = src[2] + (src[3] << 8) + (src[4] << 16);
6546  int g = src[10] + (src[11] << 8) + (src[12] << 16);
6547  blk[0] = red[r & 0x7];
6548  blk[1] = grn[g & 0x7];
6549  blk[2] = red[(r & 0x000038) >> 3];
6550  blk[3] = grn[(g & 0x000038) >> 3];
6551  blk[4] = red[(r & 0x0001c0) >> 6];
6552  blk[5] = grn[(g & 0x0001c0) >> 6];
6553  blk[6] = red[(r & 0x000e00) >> 9];
6554  blk[7] = grn[(g & 0x000e00) >> 9];
6555  blk += stride;
6556  blk[0] = red[(r & 0x007000) >> 12];
6557  blk[1] = grn[(g & 0x007000) >> 12];
6558  blk[2] = red[(r & 0x038000) >> 15];
6559  blk[3] = grn[(g & 0x038000) >> 15];
6560  blk[4] = red[(r & 0x1c0000) >> 18];
6561  blk[5] = grn[(g & 0x1c0000) >> 18];
6562  blk[6] = red[(r & 0xe00000) >> 21];
6563  blk[7] = grn[(g & 0xe00000) >> 21];
6564  blk += stride;
6565  r = src[5] + (src[6] << 8) + (src[7] << 16);
6566  g = src[13] + (src[14] << 8) + (src[15] << 16);
6567  blk[0] = red[r & 0x7];
6568  blk[1] = grn[g & 0x7];
6569  blk[2] = red[(r & 0x000038) >> 3];
6570  blk[3] = grn[(g & 0x000038) >> 3];
6571  blk[4] = red[(r & 0x0001c0) >> 6];
6572  blk[5] = grn[(g & 0x0001c0) >> 6];
6573  blk[6] = red[(r & 0x000e00) >> 9];
6574  blk[7] = grn[(g & 0x000e00) >> 9];
6575  blk += stride;
6576  blk[0] = red[(r & 0x007000) >> 12];
6577  blk[1] = grn[(g & 0x007000) >> 12];
6578  blk[2] = red[(r & 0x038000) >> 15];
6579  blk[3] = grn[(g & 0x038000) >> 15];
6580  blk[4] = red[(r & 0x1c0000) >> 18];
6581  blk[5] = grn[(g & 0x1c0000) >> 18];
6582  blk[6] = red[(r & 0xe00000) >> 21];
6583  blk[7] = grn[(g & 0xe00000) >> 21];
6584  src += 16;
6585  dest += 8;
6586  }
6587  dest += stride * 3;
6588  }
6590  }
6591 }
6592 
6593 /**
6594  *
6595  */
6596 bool Texture::
6597 do_has_all_ram_mipmap_images(const CData *cdata) const {
6598  if (cdata->_ram_images.empty() || cdata->_ram_images[0]._image.empty()) {
6599  // If we don't even have a base image, the answer is no.
6600  return false;
6601  }
6602  if (!uses_mipmaps()) {
6603  // If we have a base image and don't require mipmapping, the answer is
6604  // yes.
6605  return true;
6606  }
6607 
6608  // Check that we have enough mipmap levels to meet the size requirements.
6609  int size = max(cdata->_x_size, max(cdata->_y_size, cdata->_z_size));
6610  int n = 0;
6611  int x = 1;
6612  while (x < size) {
6613  x = (x << 1);
6614  ++n;
6615  if (n >= (int)cdata->_ram_images.size() || cdata->_ram_images[n]._image.empty()) {
6616  return false;
6617  }
6618  }
6619 
6620  return true;
6621 }
6622 
6623 /**
6624  * Considers whether the z_size (or num_views) should automatically be
6625  * adjusted when the user loads a new page. Returns true if the z size is
6626  * valid, false otherwise.
6627  *
6628  * Assumes the lock is already held.
6629  */
6630 bool Texture::
6631 do_reconsider_z_size(CData *cdata, int z, const LoaderOptions &options) {
6632  if (z >= cdata->_z_size * cdata->_num_views) {
6633  bool num_views_specified = true;
6634  if (options.get_texture_flags() & LoaderOptions::TF_multiview) {
6635  // This flag is false if is a multiview texture with a specified number
6636  // of views. It is true if it is not a multiview texture, or if it is
6637  // but the number of views is explicitly specified.
6638  num_views_specified = (options.get_texture_num_views() != 0);
6639  }
6640 
6641  if (num_views_specified &&
6642  (cdata->_texture_type == Texture::TT_3d_texture ||
6643  cdata->_texture_type == Texture::TT_2d_texture_array)) {
6644  // If we're loading a page past _z_size, treat it as an implicit request
6645  // to enlarge _z_size. However, this is only legal if this is, in fact,
6646  // a 3-d texture or a 2d texture array (cube maps always have z_size 6,
6647  // and other types have z_size 1).
6648  nassertr(cdata->_num_views != 0, false);
6649  cdata->_z_size = (z / cdata->_num_views) + 1;
6650 
6651  } else if (cdata->_z_size != 0) {
6652  // In the case of a 2-d texture or cube map, or a 3-d texture with an
6653  // unspecified _num_views, assume we're loading views of a multiview
6654  // texture.
6655  cdata->_num_views = (z / cdata->_z_size) + 1;
6656 
6657  } else {
6658  // The first image loaded sets an implicit z-size.
6659  cdata->_z_size = 1;
6660  }
6661 
6662  // Increase the size of the data buffer to make room for the new texture
6663  // level.
6664  do_allocate_pages(cdata);
6665  }
6666 
6667  return true;
6668 }
6669 
6670 /**
6671  * Called internally by do_reconsider_z_size() to allocate new memory in
6672  * _ram_images[0] for the new number of pages.
6673  *
6674  * Assumes the lock is already held.
6675  */
6676 void Texture::
6677 do_allocate_pages(CData *cdata) {
6678  size_t new_size = do_get_expected_ram_image_size(cdata);
6679  if (!cdata->_ram_images.empty() &&
6680  !cdata->_ram_images[0]._image.empty() &&
6681  new_size > cdata->_ram_images[0]._image.size()) {
6682  cdata->_ram_images[0]._image.insert(cdata->_ram_images[0]._image.end(), new_size - cdata->_ram_images[0]._image.size(), 0);
6683  nassertv(cdata->_ram_images[0]._image.size() == new_size);
6684  }
6685 }
6686 
6687 /**
6688  * Resets the internal Texture properties when a new image file is loaded.
6689  * Returns true if the new image is valid, false otherwise.
6690  *
6691  * Assumes the lock is already held.
6692  */
6693 bool Texture::
6694 do_reconsider_image_properties(CData *cdata, int x_size, int y_size, int num_components,
6695  Texture::ComponentType component_type, int z,
6696  const LoaderOptions &options) {
6697  if (!cdata->_loaded_from_image || num_components != cdata->_num_components || component_type != cdata->_component_type) {
6698  // Come up with a default format based on the number of channels. But
6699  // only do this the first time the file is loaded, or if the number of
6700  // channels in the image changes on subsequent loads.
6701 
6702  // TODO: handle sRGB properly
6703  switch (num_components) {
6704  case 1:
6705  cdata->_format = F_luminance;
6706  break;
6707 
6708  case 2:
6709  cdata->_format = F_luminance_alpha;
6710  break;
6711 
6712  case 3:
6713  cdata->_format = F_rgb;
6714  break;
6715 
6716  case 4:
6717  cdata->_format = F_rgba;
6718  break;
6719 
6720  default:
6721  // Eh?
6722  nassert_raise("unexpected channel count");
6723  cdata->_format = F_rgb;
6724  return false;
6725  }
6726  }
6727 
6728  if (!cdata->_loaded_from_image) {
6729  if ((options.get_texture_flags() & LoaderOptions::TF_allow_1d) &&
6730  cdata->_texture_type == TT_2d_texture && x_size != 1 && y_size == 1) {
6731  // If we're loading an Nx1 size texture, infer a 1-d texture type.
6732  cdata->_texture_type = TT_1d_texture;
6733  }
6734 
6735 #ifndef NDEBUG
6736  switch (cdata->_texture_type) {
6737  case TT_1d_texture:
6738  case TT_buffer_texture:
6739  nassertr(y_size == 1, false);
6740  break;
6741  case TT_cube_map:
6742  case TT_cube_map_array:
6743  nassertr(x_size == y_size, false);
6744  break;
6745  default:
6746  break;
6747  }
6748 #endif
6749  if ((cdata->_x_size != x_size)||(cdata->_y_size != y_size)) {
6750  do_set_pad_size(cdata, 0, 0, 0);
6751  }
6752  cdata->_x_size = x_size;
6753  cdata->_y_size = y_size;
6754  cdata->_num_components = num_components;
6755  do_set_component_type(cdata, component_type);
6756 
6757  } else {
6758  if (cdata->_x_size != x_size ||
6759  cdata->_y_size != y_size ||
6760  cdata->_num_components != num_components ||
6761  cdata->_component_type != component_type) {
6762  gobj_cat.error()
6763  << "Texture properties have changed for texture " << get_name()
6764  << " page " << z << ".\n";
6765  return false;
6766  }
6767  }
6768 
6769  return true;
6770 }
6771 
6772 /**
6773  *
6774  */
6775 bool Texture::
6776 do_rescale_texture(CData *cdata) {
6777  int new_x_size = cdata->_x_size;
6778  int new_y_size = cdata->_y_size;
6779  if (cdata->_z_size * cdata->_num_views != 1) {
6780  nassert_raise("rescale_texture() doesn't support 3-d or multiview textures.");
6781  return false;
6782  }
6783 
6784  if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), false)) {
6785  // OK, we have to scale the image.
6786  PNMImage orig_image;
6787  if (!do_store_one(cdata, orig_image, 0, 0)) {
6788  gobj_cat.warning()
6789  << "Couldn't get image in rescale_texture()\n";
6790  return false;
6791  }
6792 
6793  gobj_cat.info()
6794  << "Resizing " << get_name() << " to " << new_x_size << " x "
6795  << new_y_size << "\n";
6796  PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6797  orig_image.get_maxval(), orig_image.get_type(),
6798  orig_image.get_color_space());
6799  new_image.quick_filter_from(orig_image);
6800 
6801  do_clear_ram_image(cdata);
6802  cdata->inc_image_modified();
6803  cdata->_x_size = new_x_size;
6804  cdata->_y_size = new_y_size;
6805  if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6806  return false;
6807  }
6808 
6809  return true;
6810  }
6811 
6812  // Maybe we should pad the image.
6813  int pad_x_size = 0;
6814  int pad_y_size = 0;
6815  if (do_get_auto_texture_scale(cdata) == ATS_pad) {
6816  new_x_size = cdata->_x_size;
6817  new_y_size = cdata->_y_size;
6818  if (do_adjust_this_size(cdata, new_x_size, new_y_size, get_name(), true)) {
6819  pad_x_size = new_x_size - cdata->_x_size;
6820  pad_y_size = new_y_size - cdata->_y_size;
6821 
6822  PNMImage orig_image;
6823  if (!do_store_one(cdata, orig_image, 0, 0)) {
6824  gobj_cat.warning()
6825  << "Couldn't get image in rescale_texture()\n";
6826  return false;
6827  }
6828  PNMImage new_image(new_x_size, new_y_size, orig_image.get_num_channels(),
6829  orig_image.get_maxval(), orig_image.get_type(),
6830  orig_image.get_color_space());
6831  new_image.copy_sub_image(orig_image, 0, new_y_size - orig_image.get_y_size());
6832 
6833  do_clear_ram_image(cdata);
6834  cdata->_loaded_from_image = false;
6835  cdata->inc_image_modified();
6836  if (!do_load_one(cdata, new_image, get_name(), 0, 0, LoaderOptions())) {
6837  return false;
6838  }
6839 
6840  do_set_pad_size(cdata, pad_x_size, pad_y_size, 0);
6841  return true;
6842  }
6843  }
6844 
6845  // No changes needed.
6846  return false;
6847 }
6848 
6849 /**
6850  *
6851  */
6852 PT(Texture) Texture::
6853 make_copy_impl() const {
6854  CDReader cdata(_cycler);
6855  return do_make_copy(cdata);
6856 }
6857 
6858 /**
6859  *
6860  */
6861 PT(Texture) Texture::
6862 do_make_copy(const CData *cdata) const {
6863  PT(Texture) tex = new Texture(get_name());
6864  CDWriter cdata_tex(tex->_cycler, true);
6865  tex->do_assign(cdata_tex, this, cdata);
6866  return tex;
6867 }
6868 
6869 /**
6870  * The internal implementation of operator =(). Assumes the lock is already
6871  * held on both Textures.
6872  */
6873 void Texture::
6874 do_assign(CData *cdata, const Texture *copy, const CData *cdata_copy) {
6875  cdata->do_assign(cdata_copy);
6876 }
6877 
6878 /**
6879  * The protected implementation of clear(). Assumes the lock is already held.
6880  */
6881 void Texture::
6882 do_clear(CData *cdata) {
6883  Texture tex;
6884  tex.local_object();
6885  CDReader cdata_tex(tex._cycler);
6886  do_assign(cdata, &tex, cdata_tex);
6887 
6888  cdata->inc_properties_modified();
6889  cdata->inc_image_modified();
6890  cdata->inc_simple_image_modified();
6891 }
6892 
6893 /**
6894  *
6895  */
6896 void Texture::
6897 do_setup_texture(CData *cdata, Texture::TextureType texture_type,
6898  int x_size, int y_size, int z_size,
6899  Texture::ComponentType component_type,
6900  Texture::Format format) {
6901  switch (texture_type) {
6902  case TT_1d_texture:
6903  nassertv(y_size == 1 && z_size == 1);
6904  break;
6905 
6906  case TT_2d_texture:
6907  nassertv(z_size == 1);
6908  break;
6909 
6910  case TT_3d_texture:
6911  break;
6912 
6913  case TT_2d_texture_array:
6914  break;
6915 
6916  case TT_cube_map:
6917  // Cube maps must always consist of six square images.
6918  nassertv(x_size == y_size && z_size == 6);
6919 
6920  // In principle the wrap mode shouldn't mean anything to a cube map, but
6921  // some drivers seem to misbehave if it's other than
6922  // SamplerState::WM_clamp.
6923  cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6924  cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6925  cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6926  break;
6927 
6928  case TT_cube_map_array:
6929  // Cube maps array z_size needs to be a multiple of 6.
6930  nassertv(x_size == y_size && z_size % 6 == 0);
6931 
6932  cdata->_default_sampler.set_wrap_u(SamplerState::WM_clamp);
6933  cdata->_default_sampler.set_wrap_v(SamplerState::WM_clamp);
6934  cdata->_default_sampler.set_wrap_w(SamplerState::WM_clamp);
6935  break;
6936 
6937  case TT_buffer_texture:
6938  nassertv(y_size == 1 && z_size == 1);
6939  break;
6940 
6941  case TT_1d_texture_array:
6942  nassertv(z_size == 1);
6943  break;
6944  }
6945 
6946  if (texture_type != TT_2d_texture) {
6947  do_clear_simple_ram_image(cdata);
6948  }
6949 
6950  cdata->_texture_type = texture_type;
6951  cdata->_x_size = x_size;
6952  cdata->_y_size = y_size;
6953  cdata->_z_size = z_size;
6954  cdata->_num_views = 1;
6955  do_set_component_type(cdata, component_type);
6956  do_set_format(cdata, format);
6957 
6958  do_clear_ram_image(cdata);
6959  do_set_pad_size(cdata, 0, 0, 0);
6960  cdata->_orig_file_x_size = 0;
6961  cdata->_orig_file_y_size = 0;
6962  cdata->_loaded_from_image = false;
6963  cdata->_loaded_from_txo = false;
6964  cdata->_has_read_pages = false;
6965  cdata->_has_read_mipmaps = false;
6966 }
6967 
6968 /**
6969  *
6970  */
6971 void Texture::
6972 do_set_format(CData *cdata, Texture::Format format) {
6973  if (format == cdata->_format) {
6974  return;
6975  }
6976  cdata->_format = format;
6977  cdata->inc_properties_modified();
6978 
6979  switch (cdata->_format) {
6980  case F_color_index:
6981  case F_depth_stencil:
6982  case F_depth_component:
6983  case F_depth_component16:
6984  case F_depth_component24:
6985  case F_depth_component32:
6986  case F_red:
6987  case F_green:
6988  case F_blue:
6989  case F_alpha:
6990  case F_luminance:
6991  case F_r16:
6992  case F_r16i:
6993  case F_sluminance:
6994  case F_r32i:
6995  case F_r32:
6996  case F_r8i:
6997  cdata->_num_components = 1;
6998  break;
6999 
7000  case F_luminance_alpha:
7001  case F_luminance_alphamask:
7002  case F_rg16:
7003  case F_sluminance_alpha:
7004  case F_rg32:
7005  case F_rg8i:
7006  case F_rg:
7007  case F_rg16i:
7008  case F_rg32i:
7009  cdata->_num_components = 2;
7010  break;
7011 
7012  case F_rgb:
7013  case F_rgb5:
7014  case F_rgb8:
7015  case F_rgb12:
7016  case F_rgb332:
7017  case F_rgb16:
7018  case F_srgb:
7019  case F_rgb32:
7020  case F_rgb8i:
7021  case F_r11_g11_b10:
7022  case F_rgb9_e5:
7023  case F_rgb16i:
7024  case F_rgb32i:
7025  cdata->_num_components = 3;
7026  break;
7027 
7028  case F_rgba:
7029  case F_rgbm:
7030  case F_rgba4:
7031  case F_rgba5:
7032  case F_rgba8:
7033  case F_rgba12:
7034  case F_rgba16:
7035  case F_rgba32:
7036  case F_srgb_alpha:
7037  case F_rgba8i:
7038  case F_rgb10_a2:
7039  case F_rgba16i:
7040  case F_rgba32i:
7041  cdata->_num_components = 4;
7042  break;
7043  }
7044 }
7045 
7046 /**
7047  *
7048  */
7049 void Texture::
7050 do_set_component_type(CData *cdata, Texture::ComponentType component_type) {
7051  cdata->_component_type = component_type;
7052 
7053  switch (component_type) {
7054  case T_unsigned_byte:
7055  case T_byte:
7056  cdata->_component_width = 1;
7057  break;
7058 
7059  case T_unsigned_short:
7060  case T_short:
7061  case T_half_float:
7062  cdata->_component_width = 2;
7063  break;
7064 
7065  case T_float:
7066  case T_unsigned_int_24_8:
7067  case T_int:
7068  case T_unsigned_int:
7069  cdata->_component_width = 4;
7070  break;
7071  }
7072 }
7073 
7074 /**
7075  *
7076  */
7077 void Texture::
7078 do_set_x_size(CData *cdata, int x_size) {
7079  if (cdata->_x_size != x_size) {
7080  cdata->_x_size = x_size;
7081  cdata->inc_image_modified();
7082  do_clear_ram_image(cdata);
7083  do_set_pad_size(cdata, 0, 0, 0);
7084  }
7085 }
7086 
7087 /**
7088  *
7089  */
7090 void Texture::
7091 do_set_y_size(CData *cdata, int y_size) {
7092  if (cdata->_y_size != y_size) {
7093  nassertv((cdata->_texture_type != Texture::TT_buffer_texture &&
7094  cdata->_texture_type != Texture::TT_1d_texture) || y_size == 1);
7095  cdata->_y_size = y_size;
7096  cdata->inc_image_modified();
7097  do_clear_ram_image(cdata);
7098  do_set_pad_size(cdata, 0, 0, 0);
7099  }
7100 }
7101 
7102 /**
7103  * Changes the z size indicated for the texture. This also implicitly unloads
7104  * the texture if it has already been loaded.
7105  */
7106 void Texture::
7107 do_set_z_size(CData *cdata, int z_size) {
7108  if (cdata->_z_size != z_size) {
7109  nassertv((cdata->_texture_type == Texture::TT_3d_texture) ||
7110  (cdata->_texture_type == Texture::TT_cube_map && z_size == 6) ||
7111  (cdata->_texture_type == Texture::TT_cube_map_array && z_size % 6 == 0) ||
7112  (cdata->_texture_type == Texture::TT_2d_texture_array) || (z_size == 1));
7113  cdata->_z_size = z_size;
7114  cdata->inc_image_modified();
7115  do_clear_ram_image(cdata);
7116  do_set_pad_size(cdata, 0, 0, 0);
7117  }
7118 }
7119 
7120 /**
7121  *
7122  */
7123 void Texture::
7124 do_set_num_views(CData *cdata, int num_views) {
7125  nassertv(num_views >= 1);
7126  if (cdata->_num_views != num_views) {
7127  cdata->_num_views = num_views;
7128  if (do_has_ram_image(cdata)) {
7129  cdata->inc_image_modified();
7130  do_clear_ram_image(cdata);
7131  }
7132  do_set_pad_size(cdata, 0, 0, 0);
7133  }
7134 }
7135 
7136 /**
7137  *
7138  */
7139 void Texture::
7140 do_set_wrap_u(CData *cdata, SamplerState::WrapMode wrap) {
7141  if (cdata->_default_sampler.get_wrap_u() != wrap) {
7142  cdata->inc_properties_modified();
7143  cdata->_default_sampler.set_wrap_u(wrap);
7144  }
7145 }
7146 
7147 /**
7148  *
7149  */
7150 void Texture::
7151 do_set_wrap_v(CData *cdata, SamplerState::WrapMode wrap) {
7152  if (cdata->_default_sampler.get_wrap_v() != wrap) {
7153  cdata->inc_properties_modified();
7154  cdata->_default_sampler.set_wrap_v(wrap);
7155  }
7156 }
7157 
7158 /**
7159  *
7160  */
7161 void Texture::
7162 do_set_wrap_w(CData *cdata, SamplerState::WrapMode wrap) {
7163  if (cdata->_default_sampler.get_wrap_w() != wrap) {
7164  cdata->inc_properties_modified();
7165  cdata->_default_sampler.set_wrap_w(wrap);
7166  }
7167 }
7168 
7169 /**
7170  *
7171  */
7172 void Texture::
7173 do_set_minfilter(CData *cdata, SamplerState::FilterType filter) {
7174  if (cdata->_default_sampler.get_minfilter() != filter) {
7175  cdata->inc_properties_modified();
7176  cdata->_default_sampler.set_minfilter(filter);
7177  }
7178 }
7179 
7180 /**
7181  *
7182  */
7183 void Texture::
7184 do_set_magfilter(CData *cdata, SamplerState::FilterType filter) {
7185  if (cdata->_default_sampler.get_magfilter() != filter) {
7186  cdata->inc_properties_modified();
7187  cdata->_default_sampler.set_magfilter(filter);
7188  }
7189 }
7190 
7191 /**
7192  *
7193  */
7194 void Texture::
7195 do_set_anisotropic_degree(CData *cdata, int anisotropic_degree) {
7196  if (cdata->_default_sampler.get_anisotropic_degree() != anisotropic_degree) {
7197  cdata->inc_properties_modified();
7198  cdata->_default_sampler.set_anisotropic_degree(anisotropic_degree);
7199  }
7200 }
7201 
7202 /**
7203  *
7204  */
7205 void Texture::
7206 do_set_border_color(CData *cdata, const LColor &color) {
7207  if (cdata->_default_sampler.get_border_color() != color) {
7208  cdata->inc_properties_modified();
7209  cdata->_default_sampler.set_border_color(color);
7210  }
7211 }
7212 
7213 /**
7214  *
7215  */
7216 void Texture::
7217 do_set_compression(CData *cdata, Texture::CompressionMode compression) {
7218  if (cdata->_compression != compression) {
7219  cdata->inc_properties_modified();
7220  cdata->_compression = compression;
7221 
7222  if (do_has_ram_image(cdata)) {
7223  bool has_compression = do_has_compression(cdata);
7224  bool has_ram_image_compression = (cdata->_ram_image_compression != CM_off);
7225  if (has_compression != has_ram_image_compression ||
7226  has_compression) {
7227  // Reload if we're turning compression on or off, or if we're changing
7228  // the compression mode to a different kind of compression.
7229  do_reload(cdata);
7230  }
7231  }
7232  }
7233 }
7234 
7235 /**
7236  *
7237  */
7238 void Texture::
7239 do_set_quality_level(CData *cdata, Texture::QualityLevel quality_level) {
7240  if (cdata->_quality_level != quality_level) {
7241  cdata->inc_properties_modified();
7242  cdata->_quality_level = quality_level;
7243  }
7244 }
7245 
7246 /**
7247  *
7248  */
7249 bool Texture::
7250 do_has_compression(const CData *cdata) const {
7251  if (cdata->_compression == CM_default) {
7252  return compressed_textures;
7253  } else {
7254  return (cdata->_compression != CM_off);
7255  }
7256 }
7257 
7258 /**
7259  * The protected implementation of has_ram_image(). Assumes the lock is
7260  * already held.
7261  */
7262 bool Texture::
7263 do_has_ram_image(const CData *cdata) const {
7264  return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty();
7265 }
7266 
7267 /**
7268  * The protected implementation of has_uncompressed_ram_image(). Assumes the
7269  * lock is already held.
7270  */
7271 bool Texture::
7272 do_has_uncompressed_ram_image(const CData *cdata) const {
7273  return !cdata->_ram_images.empty() && !cdata->_ram_images[0]._image.empty() && cdata->_ram_image_compression == CM_off;
7274 }
7275 
7276 /**
7277  *
7278  */
7279 CPTA_uchar Texture::
7280 do_get_ram_image(CData *cdata) {
7281  if (!do_has_ram_image(cdata) && do_can_reload(cdata)) {
7282  do_reload_ram_image(cdata, true);
7283 
7284  if (do_has_ram_image(cdata)) {
7285  // Normally, we don't update the cdata->_modified semaphores in a
7286  // do_blah method, but we'll make an exception in this case, because
7287  // it's easiest to modify these here, and only when we know it's needed.
7288  cdata->inc_image_modified();
7289  cdata->inc_properties_modified();
7290  }
7291  }
7292 
7293  if (cdata->_ram_images.empty()) {
7294  return CPTA_uchar(get_class_type());
7295  }
7296 
7297  return cdata->_ram_images[0]._image;
7298 }
7299 
7300 /**
7301  *
7302  */
7303 CPTA_uchar Texture::
7304 do_get_uncompressed_ram_image(CData *cdata) {
7305  if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7306  // We have an image in-ram, but it's compressed. Try to uncompress it
7307  // first.
7308  if (do_uncompress_ram_image(cdata)) {
7309  if (gobj_cat.is_debug()) {
7310  gobj_cat.debug()
7311  << "Uncompressed " << get_name() << "\n";
7312  }
7313  return cdata->_ram_images[0]._image;
7314  }
7315  }
7316 
7317  // Couldn't uncompress the existing image. Try to reload it.
7318  if ((!do_has_ram_image(cdata) || cdata->_ram_image_compression != CM_off) && do_can_reload(cdata)) {
7319  do_reload_ram_image(cdata, false);
7320  }
7321 
7322  if (!cdata->_ram_images.empty() && cdata->_ram_image_compression != CM_off) {
7323  // Great, now we have an image.
7324  if (do_uncompress_ram_image(cdata)) {
7325  gobj_cat.info()
7326  << "Uncompressed " << get_name() << "\n";
7327  return cdata->_ram_images[0]._image;
7328  }
7329  }
7330 
7331  if (cdata->_ram_images.empty() || cdata->_ram_image_compression != CM_off) {
7332  return CPTA_uchar(get_class_type());
7333  }
7334 
7335  return cdata->_ram_images[0]._image;
7336 }
7337 
7338 /**
7339  * Returns the uncompressed system-RAM image data associated with the texture.
7340  * Rather than just returning a pointer to the data, like
7341  * get_uncompressed_ram_image, this function first processes the data and
7342  * reorders the components using the specified format string, and places these
7343  * into a new char array.
7344  *
7345  * The 'format' argument should specify in which order the components of the
7346  * texture must be. For example, valid format strings are "RGBA", "GA",
7347  * "ABRG" or "AAA". A component can also be written as "0" or "1", which
7348  * means an empty/black or a full/white channel, respectively.
7349  *
7350  * This function is particularly useful to copy an image in-memory to a
7351  * different library (for example, PIL or wxWidgets) that require a different
7352  * component order than Panda's internal format, BGRA. Note, however, that
7353  * this conversion can still be too slow if you want to do it every frame, and
7354  * should thus be avoided for that purpose.
7355  *
7356  * The only requirement for the reordering is that an uncompressed image must
7357  * be available. If the RAM image is compressed, it will attempt to re-load
7358  * the texture from disk, if it doesn't find an uncompressed image there, it
7359  * will return NULL.
7360  */
7362 get_ram_image_as(const string &requested_format) {
7363  CDWriter cdata(_cycler, false);
7364  string format = upcase(requested_format);
7365 
7366  // Make sure we can grab something that's uncompressed.
7367  CPTA_uchar data = do_get_uncompressed_ram_image(cdata);
7368  if (data == nullptr) {
7369  gobj_cat.error() << "Couldn't find an uncompressed RAM image!\n";
7370  return CPTA_uchar(get_class_type());
7371  }
7372  int imgsize = cdata->_x_size * cdata->_y_size;
7373  nassertr(cdata->_num_components > 0 && cdata->_num_components <= 4, CPTA_uchar(get_class_type()));
7374  nassertr(data.size() == (size_t)(cdata->_component_width * cdata->_num_components * imgsize), CPTA_uchar(get_class_type()));
7375 
7376  // Check if the format is already what we have internally.
7377  if ((cdata->_num_components == 1 && format.size() == 1) ||
7378  (cdata->_num_components == 2 && format.size() == 2 && format.at(1) == 'A' && format.at(0) != 'A') ||
7379  (cdata->_num_components == 3 && format == "BGR") ||
7380  (cdata->_num_components == 4 && format == "BGRA")) {
7381  // The format string is already our format, so we just need to copy it.
7382  return CPTA_uchar(data);
7383  }
7384 
7385  // Check if we have an alpha channel, and remember which channel we use.
7386  int alpha = -1;
7387  if (Texture::has_alpha(cdata->_format)) {
7388  alpha = cdata->_num_components - 1;
7389  }
7390 
7391  // Validate the format beforehand.
7392  for (size_t i = 0; i < format.size(); ++i) {
7393  if (format[i] != 'B' && format[i] != 'G' && format[i] != 'R' &&
7394  format[i] != 'A' && format[i] != '0' && format[i] != '1') {
7395  gobj_cat.error() << "Unexpected component character '"
7396  << format[i] << "', expected one of RGBA01!\n";
7397  return CPTA_uchar(get_class_type());
7398  }
7399  }
7400 
7401  // Create a new empty array that can hold our image.
7402  PTA_uchar newdata = PTA_uchar::empty_array(imgsize * format.size() * cdata->_component_width, get_class_type());
7403 
7404  // These ifs are for optimization of commonly used image types.
7405  if (cdata->_component_width == 1) {
7406  if (format == "RGBA" && cdata->_num_components == 4) {
7407  const uint32_t *src = (const uint32_t *)data.p();
7408  uint32_t *dst = (uint32_t *)newdata.p();
7409 
7410  for (int p = 0; p < imgsize; ++p) {
7411  uint32_t v = *src++;
7412  *dst++ = ((v & 0xff00ff00u)) |
7413  ((v & 0x00ff0000u) >> 16) |
7414  ((v & 0x000000ffu) << 16);
7415  }
7416  return newdata;
7417  }
7418  if (format == "RGB" && cdata->_num_components == 4) {
7419  const uint32_t *src = (const uint32_t *)data.p();
7420  uint32_t *dst = (uint32_t *)newdata.p();
7421 
7422  // Convert blocks of 4 pixels at a time, so that we can treat both the
7423  // source and destination as 32-bit integers.
7424  int blocks = imgsize >> 2;
7425  for (int i = 0; i < blocks; ++i) {
7426  uint32_t v0 = *src++;
7427  uint32_t v1 = *src++;
7428  uint32_t v2 = *src++;
7429  uint32_t v3 = *src++;
7430  *dst++ = ((v0 & 0x00ff0000u) >> 16) |
7431  ((v0 & 0x0000ff00u)) |
7432  ((v0 & 0x000000ffu) << 16) |
7433  ((v1 & 0x00ff0000u) << 8);
7434  *dst++ = ((v1 & 0x0000ff00u) >> 8) |
7435  ((v1 & 0x000000ffu) << 8) |
7436  ((v2 & 0x00ff0000u)) |
7437  ((v2 & 0x0000ff00u) << 16);
7438  *dst++ = ((v2 & 0x000000ffu)) |
7439  ((v3 & 0x00ff0000u) >> 8) |
7440  ((v3 & 0x0000ff00u) << 8) |
7441  ((v3 & 0x000000ffu) << 24);
7442  }
7443 
7444  // If the image size wasn't a multiple of 4, we may have a handful of
7445  // pixels left over. Convert those the slower way.
7446  uint8_t *tail = (uint8_t *)dst;
7447  for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7448  uint32_t v = *src++;
7449  *tail++ = (v & 0x00ff0000u) >> 16;
7450  *tail++ = (v & 0x0000ff00u) >> 8;
7451  *tail++ = (v & 0x000000ffu);
7452  }
7453  return newdata;
7454  }
7455  if (format == "BGR" && cdata->_num_components == 4) {
7456  const uint32_t *src = (const uint32_t *)data.p();
7457  uint32_t *dst = (uint32_t *)newdata.p();
7458 
7459  // Convert blocks of 4 pixels at a time, so that we can treat both the
7460  // source and destination as 32-bit integers.
7461  int blocks = imgsize >> 2;
7462  for (int i = 0; i < blocks; ++i) {
7463  uint32_t v0 = *src++;
7464  uint32_t v1 = *src++;
7465  uint32_t v2 = *src++;
7466  uint32_t v3 = *src++;
7467  *dst++ = (v0 & 0x00ffffffu) | ((v1 & 0x000000ffu) << 24);
7468  *dst++ = ((v1 & 0x00ffff00u) >> 8) | ((v2 & 0x0000ffffu) << 16);
7469  *dst++ = ((v2 & 0x00ff0000u) >> 16) | ((v3 & 0x00ffffffu) << 8);
7470  }
7471 
7472  // If the image size wasn't a multiple of 4, we may have a handful of
7473  // pixels left over. Convert those the slower way.
7474  uint8_t *tail = (uint8_t *)dst;
7475  for (int i = (imgsize & ~0x3); i < imgsize; ++i) {
7476  uint32_t v = *src++;
7477  *tail++ = (v & 0x000000ffu);
7478  *tail++ = (v & 0x0000ff00u) >> 8;
7479  *tail++ = (v & 0x00ff0000u) >> 16;
7480  }
7481  return newdata;
7482  }
7483  const uint8_t *src = (const uint8_t *)data.p();
7484  uint8_t *dst = (uint8_t *)newdata.p();
7485 
7486  if (format == "RGB" && cdata->_num_components == 3) {
7487  for (int i = 0; i < imgsize; ++i) {
7488  *dst++ = src[2];
7489  *dst++ = src[1];
7490  *dst++ = src[0];
7491  src += 3;
7492  }
7493  return newdata;
7494  }
7495  if (format == "A" && cdata->_num_components != 3) {
7496  // We can generally rely on alpha to be the last component.
7497  for (int p = 0; p < imgsize; ++p) {
7498  dst[p] = src[alpha];
7499  src += cdata->_num_components;
7500  }
7501  return newdata;
7502  }
7503  // Fallback case for other 8-bit-per-channel formats.
7504  for (int p = 0; p < imgsize; ++p) {
7505  for (size_t i = 0; i < format.size(); ++i) {
7506  if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7507  *dst++ = src[0];
7508  } else if (format[i] == 'G') {
7509  *dst++ = src[1];
7510  } else if (format[i] == 'R') {
7511  *dst++ = src[2];
7512  } else if (format[i] == 'A') {
7513  if (alpha >= 0) {
7514  *dst++ = src[alpha];
7515  } else {
7516  *dst++ = 0xff;
7517  }
7518  } else if (format[i] == '1') {
7519  *dst++ = 0xff;
7520  } else {
7521  *dst++ = 0x00;
7522  }
7523  }
7524  src += cdata->_num_components;
7525  }
7526  return newdata;
7527  }
7528 
7529  // The slow and general case.
7530  for (int p = 0; p < imgsize; ++p) {
7531  for (size_t i = 0; i < format.size(); ++i) {
7532  int component = 0;
7533  if (format[i] == 'B' || (cdata->_num_components <= 2 && format[i] != 'A')) {
7534  component = 0;
7535  } else if (format[i] == 'G') {
7536  component = 1;
7537  } else if (format[i] == 'R') {
7538  component = 2;
7539  } else if (format[i] == 'A') {
7540  if (alpha >= 0) {
7541  component = alpha;
7542  } else {
7543  memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7544  continue;
7545  }
7546  } else if (format[i] == '1') {
7547  memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), -1, cdata->_component_width);
7548  continue;
7549  } else {
7550  memset((void*)(newdata + (p * format.size() + i) * cdata->_component_width), 0, cdata->_component_width);
7551  continue;
7552  }
7553  memcpy((void*)(newdata + (p * format.size() + i) * cdata->_component_width),
7554  (void*)(data + (p * cdata->_num_components + component) * cdata->_component_width),
7555  cdata->_component_width);
7556  }
7557  }
7558  return newdata;
7559 }
7560 
7561 /**
7562  *
7563  */
7564 void Texture::
7565 do_set_simple_ram_image(CData *cdata, CPTA_uchar image, int x_size, int y_size) {
7566  nassertv(cdata->_texture_type == TT_2d_texture);
7567  size_t expected_page_size = (size_t)(x_size * y_size * 4);
7568  nassertv(image.size() == expected_page_size);
7569 
7570  cdata->_simple_x_size = x_size;
7571  cdata->_simple_y_size = y_size;
7572  cdata->_simple_ram_image._image = image.cast_non_const();
7573  cdata->_simple_ram_image._page_size = image.size();
7574  cdata->_simple_image_date_generated = (int32_t)time(nullptr);
7575  cdata->inc_simple_image_modified();
7576 }
7577 
7578 /**
7579  *
7580  */
7581 int Texture::
7582 do_get_expected_num_mipmap_levels(const CData *cdata) const {
7583  if (cdata->_texture_type == Texture::TT_buffer_texture) {
7584  return 1;
7585  }
7586  int size = max(cdata->_x_size, cdata->_y_size);
7587  if (cdata->_texture_type == Texture::TT_3d_texture) {
7588  size = max(size, cdata->_z_size);
7589  }
7590  int count = 1;
7591  while (size > 1) {
7592  size >>= 1;
7593  ++count;
7594  }
7595  return count;
7596 }
7597 
7598 /**
7599  *
7600  */
7601 size_t Texture::
7602 do_get_ram_mipmap_page_size(const CData *cdata, int n) const {
7603  if (cdata->_ram_image_compression != CM_off) {
7604  if (n >= 0 && n < (int)cdata->_ram_images.size()) {
7605  return cdata->_ram_images[n]._page_size;
7606  }
7607  return 0;
7608  } else {
7609  return do_get_expected_ram_mipmap_page_size(cdata, n);
7610  }
7611 }
7612 
7613 /**
7614  *
7615  */
7616 int Texture::
7617 do_get_expected_mipmap_x_size(const CData *cdata, int n) const {
7618  int size = max(cdata->_x_size, 1);
7619  while (n > 0 && size > 1) {
7620  size >>= 1;
7621  --n;
7622  }
7623  return size;
7624 }
7625 
7626 /**
7627  *
7628  */
7629 int Texture::
7630 do_get_expected_mipmap_y_size(const CData *cdata, int n) const {
7631  int size = max(cdata->_y_size, 1);
7632  while (n > 0 && size > 1) {
7633  size >>= 1;
7634  --n;
7635  }
7636  return size;
7637 }
7638 
7639 /**
7640  *
7641  */
7642 int Texture::
7643 do_get_expected_mipmap_z_size(const CData *cdata, int n) const {
7644  // 3-D textures have a different number of pages per each mipmap level.
7645  // Other kinds of textures--especially, cube map textures--always have the
7646  // same.
7647  if (cdata->_texture_type == Texture::TT_3d_texture) {
7648  int size = max(cdata->_z_size, 1);
7649  while (n > 0 && size > 1) {
7650  size >>= 1;
7651  --n;
7652  }
7653  return size;
7654 
7655  } else {
7656  return cdata->_z_size;
7657  }
7658 }
7659 
7660 /**
7661  *
7662  */
7663 void Texture::
7664 do_clear_simple_ram_image(CData *cdata) {
7665  cdata->_simple_x_size = 0;
7666  cdata->_simple_y_size = 0;
7667  cdata->_simple_ram_image._image.clear();
7668  cdata->_simple_ram_image._page_size = 0;
7669  cdata->_simple_image_date_generated = 0;
7670 
7671  // We allow this exception: we update the _simple_image_modified here, since
7672  // no one really cares much about that anyway, and it's convenient to do it
7673  // here.
7674  cdata->inc_simple_image_modified();
7675 }
7676 
7677 /**
7678  *
7679  */
7680 void Texture::
7681 do_clear_ram_mipmap_images(CData *cdata) {
7682  if (!cdata->_ram_images.empty()) {
7683  cdata->_ram_images.erase(cdata->_ram_images.begin() + 1, cdata->_ram_images.end());
7684  }
7685 }
7686 
7687 /**
7688  * Generates the RAM mipmap images for this texture, first uncompressing it as
7689  * required. Will recompress the image if it was originally compressed,
7690  * unless allow_recompress is true.
7691  */
7692 void Texture::
7693 do_generate_ram_mipmap_images(CData *cdata, bool allow_recompress) {
7694  nassertv(do_has_ram_image(cdata));
7695 
7696  if (do_get_expected_num_mipmap_levels(cdata) == 1) {
7697  // Don't bother.
7698  return;
7699  }
7700 
7701  RamImage orig_compressed_image;
7702  CompressionMode orig_compression_mode = CM_off;
7703 
7704  if (cdata->_ram_image_compression != CM_off) {
7705  // The RAM image is compressed. This means we need to uncompress it in
7706  // order to generate mipmap images. Save the original first, to avoid
7707  // lossy recompression.
7708  orig_compressed_image = cdata->_ram_images[0];
7709  orig_compression_mode = cdata->_ram_image_compression;
7710 
7711  // Now try to get the uncompressed source image.
7712  do_get_uncompressed_ram_image(cdata);
7713 
7714  if (cdata->_ram_image_compression != CM_off) {
7715  gobj_cat.error()
7716  << "Cannot generate mipmap levels for image with compression "
7717  << cdata->_ram_image_compression << "\n";
7718  return;
7719  }
7720  }
7721 
7722  do_clear_ram_mipmap_images(cdata);
7723 
7724  if (gobj_cat.is_debug()) {
7725  gobj_cat.debug()
7726  << "Generating mipmap levels for " << *this << "\n";
7727  }
7728 
7729  if (cdata->_texture_type == Texture::TT_3d_texture && cdata->_z_size != 1) {
7730  // Eek, a 3-D texture.
7731  int x_size = cdata->_x_size;
7732  int y_size = cdata->_y_size;
7733  int z_size = cdata->_z_size;
7734  int n = 0;
7735  while (x_size > 1 || y_size > 1 || z_size > 1) {
7736  cdata->_ram_images.push_back(RamImage());
7737  do_filter_3d_mipmap_level(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7738  x_size, y_size, z_size);
7739  x_size = max(x_size >> 1, 1);
7740  y_size = max(y_size >> 1, 1);
7741  z_size = max(z_size >> 1, 1);
7742  ++n;
7743  }
7744 
7745  } else {
7746  // A 1-D, 2-D, or cube map texture.
7747  int x_size = cdata->_x_size;
7748  int y_size = cdata->_y_size;
7749  int n = 0;
7750  while (x_size > 1 || y_size > 1) {
7751  cdata->_ram_images.push_back(RamImage());
7752  do_filter_2d_mipmap_pages(cdata, cdata->_ram_images[n + 1], cdata->_ram_images[n],
7753  x_size, y_size);
7754  x_size = max(x_size >> 1, 1);
7755  y_size = max(y_size >> 1, 1);
7756  ++n;
7757  }
7758  }
7759 
7760  if (orig_compression_mode != CM_off && allow_recompress) {
7761  // Now attempt to recompress the mipmap images according to the original
7762  // compression mode. We don't need to bother compressing the first image
7763  // (it was already compressed, after all), so temporarily remove it from
7764  // the top of the mipmap stack, and compress all of the rest of them
7765  // instead.
7766  nassertv(cdata->_ram_images.size() > 1);
7767  int l0_x_size = cdata->_x_size;
7768  int l0_y_size = cdata->_y_size;
7769  int l0_z_size = cdata->_z_size;
7770  cdata->_x_size = do_get_expected_mipmap_x_size(cdata, 1);
7771  cdata->_y_size = do_get_expected_mipmap_y_size(cdata, 1);
7772  cdata->_z_size = do_get_expected_mipmap_z_size(cdata, 1);
7773  RamImage uncompressed_image = cdata->_ram_images[0];
7774  cdata->_ram_images.erase(cdata->_ram_images.begin());
7775 
7776  bool success = do_compress_ram_image(cdata, orig_compression_mode, QL_default, nullptr);
7777  // Now restore the toplevel image.
7778  if (success) {
7779  if (gobj_cat.is_debug()) {
7780  gobj_cat.debug()
7781  << "Compressed " << get_name() << " generated mipmaps with "
7782  << cdata->_ram_image_compression << "\n";
7783  }
7784  cdata->_ram_images.insert(cdata->_ram_images.begin(), orig_compressed_image);
7785  } else {
7786  cdata->_ram_images.insert(cdata->_ram_images.begin(), uncompressed_image);
7787  }
7788  cdata->_x_size = l0_x_size;
7789  cdata->_y_size = l0_y_size;
7790  cdata->_z_size = l0_z_size;
7791  }
7792 }
7793 
7794 /**
7795  *
7796  */
7797 void Texture::
7798 do_set_pad_size(CData *cdata, int x, int y, int z) {
7799  if (x > cdata->_x_size) {
7800  x = cdata->_x_size;
7801  }
7802  if (y > cdata->_y_size) {
7803  y = cdata->_y_size;
7804  }
7805  if (z > cdata->_z_size) {
7806  z = cdata->_z_size;
7807  }
7808 
7809  cdata->_pad_x_size = x;
7810  cdata->_pad_y_size = y;
7811  cdata->_pad_z_size = z;
7812 }
7813 
7814 /**
7815  * Returns true if we can safely call do_reload_ram_image() in order to make
7816  * the image available, or false if we shouldn't do this (because we know from
7817  * a priori knowledge that it wouldn't work anyway).
7818  */
7819 bool Texture::
7820 do_can_reload(const CData *cdata) const {
7821  return (cdata->_loaded_from_image && !cdata->_fullpath.empty());
7822 }
7823 
7824 /**
7825  *
7826  */
7827 bool Texture::
7828 do_reload(CData *cdata) {
7829  if (do_can_reload(cdata)) {
7830  do_clear_ram_image(cdata);
7831  do_reload_ram_image(cdata, true);
7832  if (do_has_ram_image(cdata)) {
7833  // An explicit call to reload() should increment image_modified.
7834  cdata->inc_image_modified();
7835  return true;
7836  }
7837  return false;
7838  }
7839 
7840  // We don't have a filename to load from.
7841  return false;
7842 }
7843 
7844 /**
7845  * Returns true if there is a rawdata image that we have available to write to
7846  * the bam stream. For a normal Texture, this is the same thing as
7847  * do_has_ram_image(), but a movie texture might define it differently.
7848  */
7849 bool Texture::
7850 do_has_bam_rawdata(const CData *cdata) const {
7851  return do_has_ram_image(cdata);
7852 }
7853 
7854 /**
7855  * If do_has_bam_rawdata() returned false, this attempts to reload the rawdata
7856  * image if possible.
7857  */
7858 void Texture::
7859 do_get_bam_rawdata(CData *cdata) {
7860  do_get_ram_image(cdata);
7861 }
7862 
7863 /**
7864  * Internal method to convert pixel data from the indicated PNMImage into the
7865  * given ram_image.
7866  */
7867 void Texture::
7868 convert_from_pnmimage(PTA_uchar &image, size_t page_size,
7869  int row_stride, int x, int y, int z,
7870  const PNMImage &pnmimage, int num_components,
7871  int component_width) {
7872  int x_size = pnmimage.get_x_size();
7873  int y_size = pnmimage.get_y_size();
7874  xelval maxval = pnmimage.get_maxval();
7875  int pixel_size = num_components * component_width;
7876 
7877  int row_skip = 0;
7878  if (row_stride == 0) {
7879  row_stride = x_size;
7880  } else {
7881  row_skip = (row_stride - x_size) * pixel_size;
7882  nassertv(row_skip >= 0);
7883  }
7884 
7885  bool is_grayscale = (num_components == 1 || num_components == 2);
7886  bool has_alpha = (num_components == 2 || num_components == 4);
7887  bool img_has_alpha = pnmimage.has_alpha();
7888 
7889  int idx = page_size * z;
7890  nassertv(idx + page_size <= image.size());
7891  unsigned char *p = &image[idx];
7892 
7893  if (x != 0 || y != 0) {
7894  p += (row_stride * y + x) * pixel_size;
7895  }
7896 
7897  if (maxval == 255 && component_width == 1) {
7898  // Most common case: one byte per pixel, and the source image shows a
7899  // maxval of 255. No scaling is necessary. Because this is such a common
7900  // case, we break it out per component for best performance.
7901  const xel *array = pnmimage.get_array();
7902  switch (num_components) {
7903  case 1:
7904  for (int j = y_size-1; j >= 0; j--) {
7905  const xel *row = array + j * x_size;
7906  for (int i = 0; i < x_size; i++) {
7907  *p++ = (uchar)PPM_GETB(row[i]);
7908  }
7909  p += row_skip;
7910  }
7911  break;
7912 
7913  case 2:
7914  if (img_has_alpha) {
7915  const xelval *alpha = pnmimage.get_alpha_array();
7916  for (int j = y_size-1; j >= 0; j--) {
7917  const xel *row = array + j * x_size;
7918  const xelval *alpha_row = alpha + j * x_size;
7919  for (int i = 0; i < x_size; i++) {
7920  *p++ = (uchar)PPM_GETB(row[i]);
7921  *p++ = (uchar)alpha_row[i];
7922  }
7923  p += row_skip;
7924  }
7925  } else {
7926  for (int j = y_size-1; j >= 0; j--) {
7927  const xel *row = array + j * x_size;
7928  for (int i = 0; i < x_size; i++) {
7929  *p++ = (uchar)PPM_GETB(row[i]);
7930  *p++ = (uchar)255;
7931  }
7932  p += row_skip;
7933  }
7934  }
7935  break;
7936 
7937  case 3:
7938  for (int j = y_size-1; j >= 0; j--) {
7939  const xel *row = array + j * x_size;
7940  for (int i = 0; i < x_size; i++) {
7941  *p++ = (uchar)PPM_GETB(row[i]);
7942  *p++ = (uchar)PPM_GETG(row[i]);
7943  *p++ = (uchar)PPM_GETR(row[i]);
7944  }
7945  p += row_skip;
7946  }
7947  break;
7948 
7949  case 4:
7950  if (img_has_alpha) {
7951  const xelval *alpha = pnmimage.get_alpha_array();
7952  for (int j = y_size-1; j >= 0; j--) {
7953  const xel *row = array + j * x_size;
7954  const xelval *alpha_row = alpha + j * x_size;
7955  for (int i = 0; i < x_size; i++) {
7956  *p++ = (uchar)PPM_GETB(row[i]);
7957  *p++ = (uchar)PPM_GETG(row[i]);
7958  *p++ = (uchar)PPM_GETR(row[i]);
7959  *p++ = (uchar)alpha_row[i];
7960  }
7961  p += row_skip;
7962  }
7963  } else {
7964  for (int j = y_size-1; j >= 0; j--) {
7965  const xel *row = array + j * x_size;
7966  for (int i = 0; i < x_size; i++) {
7967  *p++ = (uchar)PPM_GETB(row[i]);
7968  *p++ = (uchar)PPM_GETG(row[i]);
7969  *p++ = (uchar)PPM_GETR(row[i]);
7970  *p++ = (uchar)255;
7971  }
7972  p += row_skip;
7973  }
7974  }
7975  break;
7976 
7977  default:
7978  nassertv(num_components >= 1 && num_components <= 4);
7979  break;
7980  }
7981 
7982  } else if (maxval == 65535 && component_width == 2) {
7983  // Another possible case: two bytes per pixel, and the source image shows
7984  // a maxval of 65535. Again, no scaling is necessary.
7985  for (int j = y_size-1; j >= 0; j--) {
7986  for (int i = 0; i < x_size; i++) {
7987  if (is_grayscale) {
7988  store_unscaled_short(p, pnmimage.get_gray_val(i, j));
7989  } else {
7990  store_unscaled_short(p, pnmimage.get_blue_val(i, j));
7991  store_unscaled_short(p, pnmimage.get_green_val(i, j));
7992  store_unscaled_short(p, pnmimage.get_red_val(i, j));
7993  }
7994  if (has_alpha) {
7995  if (img_has_alpha) {
7996  store_unscaled_short(p, pnmimage.get_alpha_val(i, j));
7997  } else {
7998  store_unscaled_short(p, 65535);
7999  }
8000  }
8001  }
8002  p += row_skip;
8003  }
8004 
8005  } else if (component_width == 1) {
8006  // A less common case: one byte per pixel, but the maxval is something
8007  // other than 255. In this case, we should scale the pixel values up to
8008  // the appropriate amount.
8009  double scale = 255.0 / (double)maxval;
8010 
8011  for (int j = y_size-1; j >= 0; j--) {
8012  for (int i = 0; i < x_size; i++) {
8013  if (is_grayscale) {
8014  store_scaled_byte(p, pnmimage.get_gray_val(i, j), scale);
8015  } else {
8016  store_scaled_byte(p, pnmimage.get_blue_val(i, j), scale);
8017  store_scaled_byte(p, pnmimage.get_green_val(i, j), scale);
8018  store_scaled_byte(p, pnmimage.get_red_val(i, j), scale);
8019  }
8020  if (has_alpha) {
8021  if (img_has_alpha) {
8022  store_scaled_byte(p, pnmimage.get_alpha_val(i, j), scale);
8023  } else {
8024  store_unscaled_byte(p, 255);
8025  }
8026  }
8027  }
8028  p += row_skip;
8029  }
8030 
8031  } else { // component_width == 2
8032  // Another uncommon case: two bytes per pixel, and the maxval is something
8033  // other than 65535. Again, we must scale the pixel values.
8034  double scale = 65535.0 / (double)maxval;
8035 
8036  for (int j = y_size-1; j >= 0; j--) {
8037  for (int i = 0; i < x_size; i++) {
8038  if (is_grayscale) {
8039  store_scaled_short(p, pnmimage.get_gray_val(i, j), scale);
8040  } else {
8041  store_scaled_short(p, pnmimage.get_blue_val(i, j), scale);
8042  store_scaled_short(p, pnmimage.get_green_val(i, j), scale);
8043  store_scaled_short(p, pnmimage.get_red_val(i, j), scale);
8044  }
8045  if (has_alpha) {
8046  if (img_has_alpha) {
8047  store_scaled_short(p, pnmimage.get_alpha_val(i, j), 1.0);
8048  } else {
8049  store_unscaled_short(p, 65535);
8050  }
8051  }
8052  }
8053  p += row_skip;
8054  }
8055  }
8056 }
8057 
8058 /**
8059  * Internal method to convert pixel data from the indicated PfmFile into the
8060  * given ram_image.
8061  */
8062 void Texture::
8063 convert_from_pfm(PTA_uchar &image, size_t page_size, int z,
8064  const PfmFile &pfm, int num_components, int component_width) {
8065  nassertv(component_width == 4); // Currently only PN_float32 is expected.
8066  int x_size = pfm.get_x_size();
8067  int y_size = pfm.get_y_size();
8068 
8069  int idx = page_size * z;
8070  nassertv(idx + page_size <= image.size());
8071  PN_float32 *p = (PN_float32 *)&image[idx];
8072 
8073  switch (num_components) {
8074  case 1:
8075  {
8076  for (int j = y_size-1; j >= 0; j--) {
8077  for (int i = 0; i < x_size; i++) {
8078  p[0] = pfm.get_channel(i, j, 0);
8079  ++p;
8080  }
8081  }
8082  }
8083  break;
8084 
8085  case 2:
8086  {
8087  for (int j = y_size-1; j >= 0; j--) {
8088  for (int i = 0; i < x_size; i++) {
8089  p[0] = pfm.get_channel(i, j, 0);
8090  p[1] = pfm.get_channel(i, j, 1);
8091  p += 2;
8092  }
8093  }
8094  }
8095  break;
8096 
8097  case 3:
8098  {
8099  // RGB -> BGR
8100  for (int j = y_size-1; j >= 0; j--) {
8101  for (int i = 0; i < x_size; i++) {
8102  p[0] = pfm.get_channel(i, j, 2);
8103  p[1] = pfm.get_channel(i, j, 1);
8104  p[2] = pfm.get_channel(i, j, 0);
8105  p += 3;
8106  }
8107  }
8108  }
8109  break;
8110 
8111  case 4:
8112  {
8113  // RGBA -> BGRA
8114  for (int j = y_size-1; j >= 0; j--) {
8115  for (int i = 0; i < x_size; i++) {
8116  p[0] = pfm.get_channel(i, j, 2);
8117  p[1] = pfm.get_channel(i, j, 1);
8118  p[2] = pfm.get_channel(i, j, 0);
8119  p[3] = pfm.get_channel(i, j, 3);
8120  p += 4;
8121  }
8122  }
8123  }
8124  break;
8125 
8126  default:
8127  nassert_raise("unexpected channel count");
8128  return;
8129  }
8130 
8131  nassertv((unsigned char *)p == &image[idx] + page_size);
8132 }
8133 
8134 /**
8135  * Internal method to convert pixel data to the indicated PNMImage from the
8136  * given ram_image.
8137  */
8138 bool Texture::
8139 convert_to_pnmimage(PNMImage &pnmimage, int x_size, int y_size,
8140  int num_components, ComponentType component_type,
8141  bool is_srgb, CPTA_uchar image, size_t page_size, int z) {
8142  xelval maxval = 0xff;
8143  if (component_type != T_unsigned_byte && component_type != T_byte) {
8144  maxval = 0xffff;
8145  }
8146  ColorSpace color_space = is_srgb ? CS_sRGB : CS_linear;
8147  pnmimage.clear(x_size, y_size, num_components, maxval, nullptr, color_space);
8148  bool has_alpha = pnmimage.has_alpha();
8149  bool is_grayscale = pnmimage.is_grayscale();
8150 
8151  int idx = page_size * z;
8152  nassertr(idx + page_size <= image.size(), false);
8153 
8154  xel *array = pnmimage.get_array();
8155  xelval *alpha = pnmimage.get_alpha_array();
8156 
8157  switch (component_type) {
8158  case T_unsigned_byte:
8159  if (is_grayscale) {
8160  const unsigned char *p = &image[idx];
8161  if (has_alpha) {
8162  for (int j = y_size-1; j >= 0; j--) {
8163  xel *row = array + j * x_size;
8164  xelval *alpha_row = alpha + j * x_size;
8165  for (int i = 0; i < x_size; i++) {
8166  PPM_PUTB(row[i], *p++);
8167  alpha_row[i] = *p++;
8168  }
8169  }
8170  } else {
8171  for (int j = y_size-1; j >= 0; j--) {
8172  xel *row = array + j * x_size;
8173  for (int i = 0; i < x_size; i++) {
8174  PPM_PUTB(row[i], *p++);
8175  }
8176  }
8177  }
8178  nassertr(p == &image[idx] + page_size, false);
8179  } else {
8180  const unsigned char *p = &image[idx];
8181  if (has_alpha) {
8182  for (int j = y_size-1; j >= 0; j--) {
8183  xel *row = array + j * x_size;
8184  xelval *alpha_row = alpha + j * x_size;
8185  for (int i = 0; i < x_size; i++) {
8186  PPM_PUTB(row[i], *p++);
8187  PPM_PUTG(row[i], *p++);
8188  PPM_PUTR(row[i], *p++);
8189  alpha_row[i] = *p++;
8190  }
8191  }
8192  } else {
8193  for (int j = y_size-1; j >= 0; j--) {
8194  xel *row = array + j * x_size;
8195  for (int i = 0; i < x_size; i++) {
8196  PPM_PUTB(row[i], *p++);
8197  PPM_PUTG(row[i], *p++);
8198  PPM_PUTR(row[i], *p++);
8199  }
8200  }
8201  }
8202  nassertr(p == &image[idx] + page_size, false);
8203  }
8204  break;
8205 
8206  case T_unsigned_short:
8207  {
8208  const uint16_t *p = (const uint16_t *)&image[idx];
8209 
8210  for (int j = y_size-1; j >= 0; j--) {
8211  xel *row = array + j * x_size;
8212  xelval *alpha_row = alpha + j * x_size;
8213  for (int i = 0; i < x_size; i++) {
8214  PPM_PUTB(row[i], *p++);
8215  if (!is_grayscale) {
8216  PPM_PUTG(row[i], *p++);
8217  PPM_PUTR(row[i], *p++);
8218  }
8219  if (has_alpha) {
8220  alpha_row[i] = *p++;
8221  }
8222  }
8223  }
8224  nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8225  }
8226  break;
8227 
8228  case T_unsigned_int:
8229  {
8230  const uint32_t *p = (const uint32_t *)&image[idx];
8231 
8232  for (int j = y_size-1; j >= 0; j--) {
8233  xel *row = array + j * x_size;
8234  xelval *alpha_row = alpha + j * x_size;
8235  for (int i = 0; i < x_size; i++) {
8236  PPM_PUTB(row[i], (*p++) >> 16u);
8237  if (!is_grayscale) {
8238  PPM_PUTG(row[i], (*p++) >> 16u);
8239  PPM_PUTR(row[i], (*p++) >> 16u);
8240  }
8241  if (has_alpha) {
8242  alpha_row[i] = (*p++) >> 16u;
8243  }
8244  }
8245  }
8246  nassertr((const unsigned char *)p == &image[idx] + page_size, false);
8247  }
8248  break;
8249 
8250  case T_half_float:
8251  {
8252  const unsigned char *p = &image[idx];
8253 
8254  for (int j = y_size-1; j >= 0; j--) {
8255  for (int i = 0; i < x_size; i++) {
8256  pnmimage.set_blue(i, j, get_half_float(p));
8257  if (!is_grayscale) {
8258  pnmimage.set_green(i, j, get_half_float(p));
8259  pnmimage.set_red(i, j, get_half_float(p));
8260  }
8261  if (has_alpha) {
8262  pnmimage.set_alpha(i, j, get_half_float(p));
8263  }
8264  }
8265  }
8266  nassertr(p == &image[idx] + page_size, false);
8267  }
8268  break;
8269 
8270  default:
8271  return false;
8272  }
8273 
8274  return true;
8275 }
8276 
8277 /**
8278  * Internal method to convert pixel data to the indicated PfmFile from the
8279  * given ram_image.
8280  */
8281 bool Texture::
8282 convert_to_pfm(PfmFile &pfm, int x_size, int y_size,
8283  int num_components, int component_width,
8284  CPTA_uchar image, size_t page_size, int z) {
8285  nassertr(component_width == 4, false); // Currently only PN_float32 is expected.
8286  pfm.clear(x_size, y_size, num_components);
8287 
8288  int idx = page_size * z;
8289  nassertr(idx + page_size <= image.size(), false);
8290  const PN_float32 *p = (const PN_float32 *)&image[idx];
8291 
8292  switch (num_components) {
8293  case 1:
8294  for (int j = y_size-1; j >= 0; j--) {
8295  for (int i = 0; i < x_size; i++) {
8296  pfm.set_channel(i, j, 0, p[0]);
8297  ++p;
8298  }
8299  }
8300  break;
8301 
8302  case 2:
8303  for (int j = y_size-1; j >= 0; j--) {
8304  for (int i = 0; i < x_size; i++) {
8305  pfm.set_channel(i, j, 0, p[0]);
8306  pfm.set_channel(i, j, 1, p[1]);
8307  p += 2;
8308  }
8309  }
8310  break;
8311 
8312  case 3:
8313  // BGR -> RGB
8314  for (int j = y_size-1; j >= 0; j--) {
8315  for (int i = 0; i < x_size; i++) {
8316  pfm.set_channel(i, j, 2, p[0]);
8317  pfm.set_channel(i, j, 1, p[1]);
8318  pfm.set_channel(i, j, 0, p[2]);
8319  p += 3;
8320  }
8321  }
8322  break;
8323 
8324  case 4:
8325  // BGRA -> RGBA
8326  for (int j = y_size-1; j >= 0; j--) {
8327  for (int i = 0; i < x_size; i++) {
8328  pfm.set_channel(i, j, 2, p[0]);
8329  pfm.set_channel(i, j, 1, p[1]);
8330  pfm.set_channel(i, j, 0, p[2]);
8331  pfm.set_channel(i, j, 3, p[3]);
8332  p += 4;
8333  }
8334  }
8335  break;
8336 
8337  default:
8338  nassert_raise("unexpected channel count");
8339  return false;
8340  }
8341 
8342  nassertr((unsigned char *)p == &image[idx] + page_size, false);
8343  return true;
8344 }
8345 
8346 /**
8347  * Called by read_dds for a DDS file in BGR8 format.
8348  */
8349 PTA_uchar Texture::
8350 read_dds_level_bgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8351  // This is in order B, G, R.
8352  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8353  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8354 
8355  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8356  size_t row_bytes = x_size * 3;
8357  PTA_uchar image = PTA_uchar::empty_array(size);
8358  for (int y = y_size - 1; y >= 0; --y) {
8359  unsigned char *p = image.p() + y * row_bytes;
8360  nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8361  in.read((char *)p, row_bytes);
8362  }
8363 
8364  return image;
8365 }
8366 
8367 /**
8368  * Called by read_dds for a DDS file in RGB8 format.
8369  */
8370 PTA_uchar Texture::
8371 read_dds_level_rgb8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8372  // This is in order R, G, B.
8373  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8374  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8375 
8376  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8377  size_t row_bytes = x_size * 3;
8378  PTA_uchar image = PTA_uchar::empty_array(size);
8379  for (int y = y_size - 1; y >= 0; --y) {
8380  unsigned char *p = image.p() + y * row_bytes;
8381  nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8382  in.read((char *)p, row_bytes);
8383 
8384  // Now reverse the r, g, b triples.
8385  for (int x = 0; x < x_size; ++x) {
8386  unsigned char r = p[0];
8387  p[0] = p[2];
8388  p[2] = r;
8389  p += 3;
8390  }
8391  nassertr(p <= image.p() + size, PTA_uchar());
8392  }
8393 
8394  return image;
8395 }
8396 
8397 /**
8398  * Called by read_dds for a DDS file in ABGR8 format.
8399  */
8400 PTA_uchar Texture::
8401 read_dds_level_abgr8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8402  // This is laid out in order R, G, B, A.
8403  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8404  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8405 
8406  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8407  size_t row_bytes = x_size * 4;
8408  PTA_uchar image = PTA_uchar::empty_array(size);
8409  for (int y = y_size - 1; y >= 0; --y) {
8410  unsigned char *p = image.p() + y * row_bytes;
8411  in.read((char *)p, row_bytes);
8412 
8413  uint32_t *pw = (uint32_t *)p;
8414  for (int x = 0; x < x_size; ++x) {
8415  uint32_t w = *pw;
8416 #ifdef WORDS_BIGENDIAN
8417  // bigendian: convert R, G, B, A to B, G, R, A.
8418  w = ((w & 0xff00) << 16) | ((w & 0xff000000U) >> 16) | (w & 0xff00ff);
8419 #else
8420  // littendian: convert A, B, G, R to to A, R, G, B.
8421  w = ((w & 0xff) << 16) | ((w & 0xff0000) >> 16) | (w & 0xff00ff00U);
8422 #endif
8423  *pw = w;
8424  ++pw;
8425  }
8426  nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8427  }
8428 
8429  return image;
8430 }
8431 
8432 /**
8433  * Called by read_dds for a DDS file in RGBA8 format.
8434  */
8435 PTA_uchar Texture::
8436 read_dds_level_rgba8(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8437  // This is actually laid out in order B, G, R, A.
8438  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8439  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8440 
8441  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8442  size_t row_bytes = x_size * 4;
8443  PTA_uchar image = PTA_uchar::empty_array(size);
8444  for (int y = y_size - 1; y >= 0; --y) {
8445  unsigned char *p = image.p() + y * row_bytes;
8446  nassertr(p + row_bytes <= image.p() + size, PTA_uchar());
8447  in.read((char *)p, row_bytes);
8448  }
8449 
8450  return image;
8451 }
8452 
8453 /**
8454  * Called by read_dds for a DDS file in ABGR16 format.
8455  */
8456 PTA_uchar Texture::
8457 read_dds_level_abgr16(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8458  // This is laid out in order R, G, B, A.
8459  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8460  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8461 
8462  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8463  size_t row_bytes = x_size * 8;
8464  PTA_uchar image = PTA_uchar::empty_array(size);
8465  for (int y = y_size - 1; y >= 0; --y) {
8466  unsigned char *p = image.p() + y * row_bytes;
8467  in.read((char *)p, row_bytes);
8468 
8469  uint16_t *pw = (uint16_t *)p;
8470  for (int x = 0; x < x_size; ++x) {
8471  swap(pw[0], pw[2]);
8472  pw += 4;
8473  }
8474  nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8475  }
8476 
8477  return image;
8478 }
8479 
8480 /**
8481  * Called by read_dds for a DDS file in ABGR32 format.
8482  */
8483 PTA_uchar Texture::
8484 read_dds_level_abgr32(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8485  // This is laid out in order R, G, B, A.
8486  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8487  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8488 
8489  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8490  size_t row_bytes = x_size * 16;
8491  nassertr(row_bytes * y_size == size, PTA_uchar());
8492  PTA_uchar image = PTA_uchar::empty_array(size);
8493  for (int y = y_size - 1; y >= 0; --y) {
8494  unsigned char *p = image.p() + y * row_bytes;
8495  in.read((char *)p, row_bytes);
8496 
8497  uint32_t *pw = (uint32_t *)p;
8498  for (int x = 0; x < x_size; ++x) {
8499  swap(pw[0], pw[2]);
8500  pw += 4;
8501  }
8502  nassertr((unsigned char *)pw <= image.p() + size, PTA_uchar());
8503  }
8504 
8505  return image;
8506 }
8507 
8508 /**
8509  * Called by read_dds for a DDS file that needs no transformations applied.
8510  */
8511 PTA_uchar Texture::
8512 read_dds_level_raw(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8513  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8514  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8515 
8516  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8517  size_t row_bytes = x_size * cdata->_num_components * cdata->_component_width;
8518  nassertr(row_bytes * y_size == size, PTA_uchar());
8519  PTA_uchar image = PTA_uchar::empty_array(size);
8520  for (int y = y_size - 1; y >= 0; --y) {
8521  unsigned char *p = image.p() + y * row_bytes;
8522  in.read((char *)p, row_bytes);
8523  }
8524 
8525  return image;
8526 }
8527 
8528 /**
8529  * Called by read_dds for a DDS file whose format isn't one we've specifically
8530  * optimized.
8531  */
8532 PTA_uchar Texture::
8533 read_dds_level_generic_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8534  int n, istream &in) {
8535  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8536  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8537 
8538  int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8539 
8540  // MS says the pitch can be supplied in the header file and must be DWORD
8541  // aligned, but this appears to apply to level 0 mipmaps only (where it
8542  // almost always will be anyway). Other mipmap levels seem to be tightly
8543  // packed, but there isn't a separate pitch for each mipmap level. Weird.
8544  if (n == 0) {
8545  pitch = ((pitch + 3) / 4) * 4;
8546  if (header.dds_flags & DDSD_PITCH) {
8547  pitch = header.pitch;
8548  }
8549  }
8550 
8551  int bpp = header.pf.rgb_bitcount / 8;
8552  int skip_bytes = pitch - (bpp * x_size);
8553  nassertr(skip_bytes >= 0, PTA_uchar());
8554 
8555  unsigned int r_mask = header.pf.r_mask;
8556  unsigned int g_mask = header.pf.g_mask;
8557  unsigned int b_mask = header.pf.b_mask;
8558  unsigned int a_mask = header.pf.a_mask;
8559 
8560  // Determine the number of bits to shift each mask to the right so that the
8561  // lowest on bit is at bit 0.
8562  int r_shift = get_lowest_on_bit(r_mask);
8563  int g_shift = get_lowest_on_bit(g_mask);
8564  int b_shift = get_lowest_on_bit(b_mask);
8565  int a_shift = get_lowest_on_bit(a_mask);
8566 
8567  // Then determine the scale factor required to raise the highest color value
8568  // to 0xff000000.
8569  unsigned int r_scale = 0;
8570  if (r_mask != 0) {
8571  r_scale = 0xff000000 / (r_mask >> r_shift);
8572  }
8573  unsigned int g_scale = 0;
8574  if (g_mask != 0) {
8575  g_scale = 0xff000000 / (g_mask >> g_shift);
8576  }
8577  unsigned int b_scale = 0;
8578  if (b_mask != 0) {
8579  b_scale = 0xff000000 / (b_mask >> b_shift);
8580  }
8581  unsigned int a_scale = 0;
8582  if (a_mask != 0) {
8583  a_scale = 0xff000000 / (a_mask >> a_shift);
8584  }
8585 
8586  bool add_alpha = has_alpha(cdata->_format);
8587 
8588  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8589  size_t row_bytes = x_size * cdata->_num_components;
8590  PTA_uchar image = PTA_uchar::empty_array(size);
8591  for (int y = y_size - 1; y >= 0; --y) {
8592  unsigned char *p = image.p() + y * row_bytes;
8593  for (int x = 0; x < x_size; ++x) {
8594 
8595  // Read a little-endian numeric value of bpp bytes.
8596  unsigned int pixel = 0;
8597  int shift = 0;
8598  for (int bi = 0; bi < bpp; ++bi) {
8599  unsigned int ch = (unsigned char)in.get();
8600  pixel |= (ch << shift);
8601  shift += 8;
8602  }
8603 
8604  // Then break apart that value into its R, G, B, and maybe A components.
8605  unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8606  unsigned int g = (((pixel & g_mask) >> g_shift) * g_scale) >> 24;
8607  unsigned int b = (((pixel & b_mask) >> b_shift) * b_scale) >> 24;
8608 
8609  // Store the components in the Texture's image data.
8610  store_unscaled_byte(p, b);
8611  store_unscaled_byte(p, g);
8612  store_unscaled_byte(p, r);
8613  if (add_alpha) {
8614  unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8615  store_unscaled_byte(p, a);
8616  }
8617  }
8618  nassertr(p <= image.p() + size, PTA_uchar());
8619  for (int bi = 0; bi < skip_bytes; ++bi) {
8620  in.get();
8621  }
8622  }
8623 
8624  return image;
8625 }
8626 
8627 /**
8628  * Called by read_dds for a DDS file in uncompressed luminance or luminance-
8629  * alpha format.
8630  */
8631 PTA_uchar Texture::
8632 read_dds_level_luminance_uncompressed(Texture *tex, CData *cdata, const DDSHeader &header,
8633  int n, istream &in) {
8634  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8635  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8636 
8637  int pitch = (x_size * header.pf.rgb_bitcount) / 8;
8638 
8639  // MS says the pitch can be supplied in the header file and must be DWORD
8640  // aligned, but this appears to apply to level 0 mipmaps only (where it
8641  // almost always will be anyway). Other mipmap levels seem to be tightly
8642  // packed, but there isn't a separate pitch for each mipmap level. Weird.
8643  if (n == 0) {
8644  pitch = ((pitch + 3) / 4) * 4;
8645  if (header.dds_flags & DDSD_PITCH) {
8646  pitch = header.pitch;
8647  }
8648  }
8649 
8650  int bpp = header.pf.rgb_bitcount / 8;
8651  int skip_bytes = pitch - (bpp * x_size);
8652  nassertr(skip_bytes >= 0, PTA_uchar());
8653 
8654  unsigned int r_mask = header.pf.r_mask;
8655  unsigned int a_mask = header.pf.a_mask;
8656 
8657  // Determine the number of bits to shift each mask to the right so that the
8658  // lowest on bit is at bit 0.
8659  int r_shift = get_lowest_on_bit(r_mask);
8660  int a_shift = get_lowest_on_bit(a_mask);
8661 
8662  // Then determine the scale factor required to raise the highest color value
8663  // to 0xff000000.
8664  unsigned int r_scale = 0;
8665  if (r_mask != 0) {
8666  r_scale = 0xff000000 / (r_mask >> r_shift);
8667  }
8668  unsigned int a_scale = 0;
8669  if (a_mask != 0) {
8670  a_scale = 0xff000000 / (a_mask >> a_shift);
8671  }
8672 
8673  bool add_alpha = has_alpha(cdata->_format);
8674 
8675  size_t size = tex->do_get_expected_ram_mipmap_page_size(cdata, n);
8676  size_t row_bytes = x_size * cdata->_num_components;
8677  PTA_uchar image = PTA_uchar::empty_array(size);
8678  for (int y = y_size - 1; y >= 0; --y) {
8679  unsigned char *p = image.p() + y * row_bytes;
8680  for (int x = 0; x < x_size; ++x) {
8681 
8682  // Read a little-endian numeric value of bpp bytes.
8683  unsigned int pixel = 0;
8684  int shift = 0;
8685  for (int bi = 0; bi < bpp; ++bi) {
8686  unsigned int ch = (unsigned char)in.get();
8687  pixel |= (ch << shift);
8688  shift += 8;
8689  }
8690 
8691  unsigned int r = (((pixel & r_mask) >> r_shift) * r_scale) >> 24;
8692 
8693  // Store the components in the Texture's image data.
8694  store_unscaled_byte(p, r);
8695  if (add_alpha) {
8696  unsigned int a = (((pixel & a_mask) >> a_shift) * a_scale) >> 24;
8697  store_unscaled_byte(p, a);
8698  }
8699  }
8700  nassertr(p <= image.p() + size, PTA_uchar());
8701  for (int bi = 0; bi < skip_bytes; ++bi) {
8702  in.get();
8703  }
8704  }
8705 
8706  return image;
8707 }
8708 
8709 /**
8710  * Called by read_dds for DXT1 file format.
8711  */
8712 PTA_uchar Texture::
8713 read_dds_level_bc1(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8714  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8715  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8716 
8717  static const int div = 4;
8718  static const int block_bytes = 8;
8719 
8720  // The DXT1 image is divided into num_rows x num_cols blocks, where each
8721  // block represents 4x4 pixels.
8722  int num_cols = max(div, x_size) / div;
8723  int num_rows = max(div, y_size) / div;
8724  int row_length = num_cols * block_bytes;
8725  int linear_size = row_length * num_rows;
8726 
8727  if (n == 0) {
8728  if (header.dds_flags & DDSD_LINEARSIZE) {
8729  nassertr(linear_size == (int)header.pitch, PTA_uchar());
8730  }
8731  }
8732 
8733  PTA_uchar image = PTA_uchar::empty_array(linear_size);
8734 
8735  if (y_size >= 4) {
8736  // We have to flip the image as we read it, because of DirectX's inverted
8737  // sense of up. That means we (a) reverse the order of the rows of blocks
8738  // . . .
8739  for (int ri = num_rows - 1; ri >= 0; --ri) {
8740  unsigned char *p = image.p() + row_length * ri;
8741  in.read((char *)p, row_length);
8742 
8743  for (int ci = 0; ci < num_cols; ++ci) {
8744  // . . . and (b) within each block, we reverse the 4 individual rows
8745  // of 4 pixels.
8746  uint32_t *cells = (uint32_t *)p;
8747  uint32_t w = cells[1];
8748  w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8749  cells[1] = w;
8750 
8751  p += block_bytes;
8752  }
8753  }
8754 
8755  } else if (y_size >= 2) {
8756  // To invert a two-pixel high image, we just flip two rows within a cell.
8757  unsigned char *p = image.p();
8758  in.read((char *)p, row_length);
8759 
8760  for (int ci = 0; ci < num_cols; ++ci) {
8761  uint32_t *cells = (uint32_t *)p;
8762  uint32_t w = cells[1];
8763  w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8764  cells[1] = w;
8765 
8766  p += block_bytes;
8767  }
8768 
8769  } else if (y_size >= 1) {
8770  // No need to invert a one-pixel-high image.
8771  unsigned char *p = image.p();
8772  in.read((char *)p, row_length);
8773  }
8774 
8775  return image;
8776 }
8777 
8778 /**
8779  * Called by read_dds for DXT2 or DXT3 file format.
8780  */
8781 PTA_uchar Texture::
8782 read_dds_level_bc2(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8783  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8784  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8785 
8786  static const int div = 4;
8787  static const int block_bytes = 16;
8788 
8789  // The DXT3 image is divided into num_rows x num_cols blocks, where each
8790  // block represents 4x4 pixels. Unlike DXT1, each block consists of two
8791  // 8-byte chunks, representing the alpha and color separately.
8792  int num_cols = max(div, x_size) / div;
8793  int num_rows = max(div, y_size) / div;
8794  int row_length = num_cols * block_bytes;
8795  int linear_size = row_length * num_rows;
8796 
8797  if (n == 0) {
8798  if (header.dds_flags & DDSD_LINEARSIZE) {
8799  nassertr(linear_size == (int)header.pitch, PTA_uchar());
8800  }
8801  }
8802 
8803  PTA_uchar image = PTA_uchar::empty_array(linear_size);
8804 
8805  if (y_size >= 4) {
8806  // We have to flip the image as we read it, because of DirectX's inverted
8807  // sense of up. That means we (a) reverse the order of the rows of blocks
8808  // . . .
8809  for (int ri = num_rows - 1; ri >= 0; --ri) {
8810  unsigned char *p = image.p() + row_length * ri;
8811  in.read((char *)p, row_length);
8812 
8813  for (int ci = 0; ci < num_cols; ++ci) {
8814  // . . . and (b) within each block, we reverse the 4 individual rows
8815  // of 4 pixels.
8816  uint32_t *cells = (uint32_t *)p;
8817 
8818  // Alpha. The block is four 16-bit words of pixel data.
8819  uint32_t w0 = cells[0];
8820  uint32_t w1 = cells[1];
8821  w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8822  w1 = ((w1 & 0xffff) << 16) | ((w1 & 0xffff0000U) >> 16);
8823  cells[0] = w1;
8824  cells[1] = w0;
8825 
8826  // Color. Only the second 32-bit dword of the color block represents
8827  // the pixel data.
8828  uint32_t w = cells[3];
8829  w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8830  cells[3] = w;
8831 
8832  p += block_bytes;
8833  }
8834  }
8835 
8836  } else if (y_size >= 2) {
8837  // To invert a two-pixel high image, we just flip two rows within a cell.
8838  unsigned char *p = image.p();
8839  in.read((char *)p, row_length);
8840 
8841  for (int ci = 0; ci < num_cols; ++ci) {
8842  uint32_t *cells = (uint32_t *)p;
8843 
8844  uint32_t w0 = cells[0];
8845  w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8846  cells[0] = w0;
8847 
8848  uint32_t w = cells[3];
8849  w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8850  cells[3] = w;
8851 
8852  p += block_bytes;
8853  }
8854 
8855  } else if (y_size >= 1) {
8856  // No need to invert a one-pixel-high image.
8857  unsigned char *p = image.p();
8858  in.read((char *)p, row_length);
8859  }
8860 
8861  return image;
8862 }
8863 
8864 /**
8865  * Called by read_dds for DXT4 or DXT5 file format.
8866  */
8867 PTA_uchar Texture::
8868 read_dds_level_bc3(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8869  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8870  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8871 
8872  static const int div = 4;
8873  static const int block_bytes = 16;
8874 
8875  // The DXT5 image is similar to DXT3, in that there each 4x4 block of pixels
8876  // consists of an alpha block and a color block, but the layout of the alpha
8877  // block is different.
8878  int num_cols = max(div, x_size) / div;
8879  int num_rows = max(div, y_size) / div;
8880  int row_length = num_cols * block_bytes;
8881  int linear_size = row_length * num_rows;
8882 
8883  if (n == 0) {
8884  if (header.dds_flags & DDSD_LINEARSIZE) {
8885  nassertr(linear_size == (int)header.pitch, PTA_uchar());
8886  }
8887  }
8888 
8889  PTA_uchar image = PTA_uchar::empty_array(linear_size);
8890 
8891  if (y_size >= 4) {
8892  // We have to flip the image as we read it, because of DirectX's inverted
8893  // sense of up. That means we (a) reverse the order of the rows of blocks
8894  // . . .
8895  for (int ri = num_rows - 1; ri >= 0; --ri) {
8896  unsigned char *p = image.p() + row_length * ri;
8897  in.read((char *)p, row_length);
8898 
8899  for (int ci = 0; ci < num_cols; ++ci) {
8900  // . . . and (b) within each block, we reverse the 4 individual rows
8901  // of 4 pixels.
8902  uint32_t *cells = (uint32_t *)p;
8903 
8904  // Alpha. The block is one 16-bit word of reference values, followed
8905  // by six words of pixel values, in 12-bit rows. Tricky to invert.
8906  unsigned char p2 = p[2];
8907  unsigned char p3 = p[3];
8908  unsigned char p4 = p[4];
8909  unsigned char p5 = p[5];
8910  unsigned char p6 = p[6];
8911  unsigned char p7 = p[7];
8912 
8913  p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
8914  p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
8915  p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
8916  p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8917  p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8918  p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8919 
8920  // Color. Only the second 32-bit dword of the color block represents
8921  // the pixel data.
8922  uint32_t w = cells[3];
8923  w = ((w & 0xff) << 24) | ((w & 0xff00) << 8) | ((w & 0xff0000) >> 8) | ((w & 0xff000000U) >> 24);
8924  cells[3] = w;
8925 
8926  p += block_bytes;
8927  }
8928  }
8929 
8930  } else if (y_size >= 2) {
8931  // To invert a two-pixel high image, we just flip two rows within a cell.
8932  unsigned char *p = image.p();
8933  in.read((char *)p, row_length);
8934 
8935  for (int ci = 0; ci < num_cols; ++ci) {
8936  uint32_t *cells = (uint32_t *)p;
8937 
8938  unsigned char p2 = p[2];
8939  unsigned char p3 = p[3];
8940  unsigned char p4 = p[4];
8941 
8942  p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
8943  p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
8944  p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
8945 
8946  uint32_t w0 = cells[0];
8947  w0 = ((w0 & 0xffff) << 16) | ((w0 & 0xffff0000U) >> 16);
8948  cells[0] = w0;
8949 
8950  uint32_t w = cells[3];
8951  w = ((w & 0xff) << 8) | ((w & 0xff00) >> 8);
8952  cells[3] = w;
8953 
8954  p += block_bytes;
8955  }
8956 
8957  } else if (y_size >= 1) {
8958  // No need to invert a one-pixel-high image.
8959  unsigned char *p = image.p();
8960  in.read((char *)p, row_length);
8961  }
8962 
8963  return image;
8964 }
8965 
8966 /**
8967  * Called by read_dds for ATI1 compression.
8968  */
8969 PTA_uchar Texture::
8970 read_dds_level_bc4(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
8971  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
8972  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
8973 
8974  static const int div = 4;
8975  static const int block_bytes = 8;
8976 
8977  // The ATI1 (BC4) format uses the same compression mechanism as the alpha
8978  // channel of DXT5.
8979  int num_cols = max(div, x_size) / div;
8980  int num_rows = max(div, y_size) / div;
8981  int row_length = num_cols * block_bytes;
8982  int linear_size = row_length * num_rows;
8983 
8984  if (n == 0) {
8985  if (header.dds_flags & DDSD_LINEARSIZE) {
8986  nassertr(linear_size == (int)header.pitch, PTA_uchar());
8987  }
8988  }
8989 
8990  PTA_uchar image = PTA_uchar::empty_array(linear_size);
8991 
8992  if (y_size >= 4) {
8993  // We have to flip the image as we read it, because of DirectX's inverted
8994  // sense of up. That means we (a) reverse the order of the rows of blocks
8995  // . . .
8996  for (int ri = num_rows - 1; ri >= 0; --ri) {
8997  unsigned char *p = image.p() + row_length * ri;
8998  in.read((char *)p, row_length);
8999 
9000  for (int ci = 0; ci < num_cols; ++ci) {
9001  // . . . and (b) within each block, we reverse the 4 individual rows
9002  // of 4 pixels. The block is one 16-bit word of reference values,
9003  // followed by six words of pixel values, in 12-bit rows. Tricky to
9004  // invert.
9005  unsigned char p2 = p[2];
9006  unsigned char p3 = p[3];
9007  unsigned char p4 = p[4];
9008  unsigned char p5 = p[5];
9009  unsigned char p6 = p[6];
9010  unsigned char p7 = p[7];
9011 
9012  p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9013  p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9014  p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9015  p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9016  p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9017  p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9018 
9019  p += block_bytes;
9020  }
9021  }
9022 
9023  } else if (y_size >= 2) {
9024  // To invert a two-pixel high image, we just flip two rows within a cell.
9025  unsigned char *p = image.p();
9026  in.read((char *)p, row_length);
9027 
9028  for (int ci = 0; ci < num_cols; ++ci) {
9029  unsigned char p2 = p[2];
9030  unsigned char p3 = p[3];
9031  unsigned char p4 = p[4];
9032 
9033  p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9034  p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9035  p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9036 
9037  p += block_bytes;
9038  }
9039 
9040  } else if (y_size >= 1) {
9041  // No need to invert a one-pixel-high image.
9042  unsigned char *p = image.p();
9043  in.read((char *)p, row_length);
9044  }
9045 
9046  return image;
9047 }
9048 
9049 /**
9050  * Called by read_dds for ATI2 compression.
9051  */
9052 PTA_uchar Texture::
9053 read_dds_level_bc5(Texture *tex, CData *cdata, const DDSHeader &header, int n, istream &in) {
9054  int x_size = tex->do_get_expected_mipmap_x_size(cdata, n);
9055  int y_size = tex->do_get_expected_mipmap_y_size(cdata, n);
9056 
9057  // The ATI2 (BC5) format uses the same compression mechanism as the ATI1
9058  // (BC4) format, but doubles the channels.
9059  int num_cols = max(4, x_size) / 2;
9060  int num_rows = max(4, y_size) / 4;
9061  int row_length = num_cols * 8;
9062  int linear_size = row_length * num_rows;
9063 
9064  if (n == 0) {
9065  if (header.dds_flags & DDSD_LINEARSIZE) {
9066  nassertr(linear_size == (int)header.pitch, PTA_uchar());
9067  }
9068  }
9069 
9070  PTA_uchar image = PTA_uchar::empty_array(linear_size);
9071 
9072  if (y_size >= 4) {
9073  // We have to flip the image as we read it, because of DirectX's inverted
9074  // sense of up. That means we (a) reverse the order of the rows of blocks
9075  // . . .
9076  for (int ri = num_rows - 1; ri >= 0; --ri) {
9077  unsigned char *p = image.p() + row_length * ri;
9078  in.read((char *)p, row_length);
9079 
9080  for (int ci = 0; ci < num_cols; ++ci) {
9081  // . . . and (b) within each block, we reverse the 4 individual rows
9082  // of 4 pixels. The block is one 16-bit word of reference values,
9083  // followed by six words of pixel values, in 12-bit rows. Tricky to
9084  // invert.
9085  unsigned char p2 = p[2];
9086  unsigned char p3 = p[3];
9087  unsigned char p4 = p[4];
9088  unsigned char p5 = p[5];
9089  unsigned char p6 = p[6];
9090  unsigned char p7 = p[7];
9091 
9092  p[2] = ((p7 & 0xf) << 4) | ((p6 & 0xf0) >> 4);
9093  p[3] = ((p5 & 0xf) << 4) | ((p7 & 0xf0) >> 4);
9094  p[4] = ((p6 & 0xf) << 4) | ((p5 & 0xf0) >> 4);
9095  p[5] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9096  p[6] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9097  p[7] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9098 
9099  p += 8;
9100  }
9101  }
9102 
9103  } else if (y_size >= 2) {
9104  // To invert a two-pixel high image, we just flip two rows within a cell.
9105  unsigned char *p = image.p();
9106  in.read((char *)p, row_length);
9107 
9108  for (int ci = 0; ci < num_cols; ++ci) {
9109  unsigned char p2 = p[2];
9110  unsigned char p3 = p[3];
9111  unsigned char p4 = p[4];
9112 
9113  p[2] = ((p4 & 0xf) << 4) | ((p3 & 0xf0) >> 4);
9114  p[3] = ((p2 & 0xf) << 4) | ((p4 & 0xf0) >> 4);
9115  p[4] = ((p3 & 0xf) << 4) | ((p2 & 0xf0) >> 4);
9116 
9117  p += 8;
9118  }
9119 
9120  } else if (y_size >= 1) {
9121  // No need to invert a one-pixel-high image.
9122  unsigned char *p = image.p();
9123  in.read((char *)p, row_length);
9124  }
9125 
9126  return image;
9127 }
9128 
9129 /**
9130  * Removes the indicated PreparedGraphicsObjects table from the Texture's
9131  * table, without actually releasing the texture. This is intended to be
9132  * called only from PreparedGraphicsObjects::release_texture(); it should
9133  * never be called by user code.
9134  */
9135 void Texture::
9136 clear_prepared(int view, PreparedGraphicsObjects *prepared_objects) {
9137  PreparedViews::iterator pvi;
9138  pvi = _prepared_views.find(prepared_objects);
9139  if (pvi != _prepared_views.end()) {
9140  Contexts &contexts = (*pvi).second;
9141  Contexts::iterator ci;
9142  ci = contexts.find(view);
9143  if (ci != contexts.end()) {
9144  contexts.erase(ci);
9145  }
9146 
9147  if (contexts.empty()) {
9148  _prepared_views.erase(pvi);
9149  }
9150  }
9151 }
9152 
9153 /**
9154  * Reduces the number of channels in the texture, if necessary, according to
9155  * num_channels.
9156  */
9157 void Texture::
9158 consider_downgrade(PNMImage &pnmimage, int num_channels, const string &name) {
9159  if (num_channels != 0 && num_channels < pnmimage.get_num_channels()) {
9160  // One special case: we can't reduce from 3 to 2 components, since that
9161  // would require adding an alpha channel.
9162  if (pnmimage.get_num_channels() == 3 && num_channels == 2) {
9163  return;
9164  }
9165 
9166  gobj_cat.info()
9167  << "Downgrading " << name << " from "
9168  << pnmimage.get_num_channels() << " components to "
9169  << num_channels << ".\n";
9170  pnmimage.set_num_channels(num_channels);
9171  }
9172 }
9173 
9174 /**
9175  * Called by generate_simple_ram_image(), this compares the two PNMImages
9176  * pixel-by-pixel. If they're similar enough (within a given threshold),
9177  * returns true.
9178  */
9179 bool Texture::
9180 compare_images(const PNMImage &a, const PNMImage &b) {
9181  nassertr(a.get_maxval() == 255 && b.get_maxval() == 255, false);
9182  nassertr(a.get_num_channels() == 4 && b.get_num_channels() == 4, false);
9183  nassertr(a.get_x_size() == b.get_x_size() &&
9184  a.get_y_size() == b.get_y_size(), false);
9185 
9186  const xel *a_array = a.get_array();
9187  const xel *b_array = b.get_array();
9188  const xelval *a_alpha = a.get_alpha_array();
9189  const xelval *b_alpha = b.get_alpha_array();
9190 
9191  int x_size = a.get_x_size();
9192 
9193  int delta = 0;
9194  for (int yi = 0; yi < a.get_y_size(); ++yi) {
9195  const xel *a_row = a_array + yi * x_size;
9196  const xel *b_row = b_array + yi * x_size;
9197  const xelval *a_alpha_row = a_alpha + yi * x_size;
9198  const xelval *b_alpha_row = b_alpha + yi * x_size;
9199  for (int xi = 0; xi < x_size; ++xi) {
9200  delta += abs(PPM_GETR(a_row[xi]) - PPM_GETR(b_row[xi]));
9201  delta += abs(PPM_GETG(a_row[xi]) - PPM_GETG(b_row[xi]));
9202  delta += abs(PPM_GETB(a_row[xi]) - PPM_GETB(b_row[xi]));
9203  delta += abs(a_alpha_row[xi] - b_alpha_row[xi]);
9204  }
9205  }
9206 
9207  double average_delta = (double)delta / ((double)a.get_x_size() * (double)b.get_y_size() * (double)a.get_maxval());
9208  return (average_delta <= simple_image_threshold);
9209 }
9210 
9211 /**
9212  * Generates the next mipmap level from the previous one. If there are
9213  * multiple pages (e.g. a cube map), generates each page independently.
9214  *
9215  * x_size and y_size are the size of the previous level. They need not be a
9216  * power of 2, or even a multiple of 2.
9217  *
9218  * Assumes the lock is already held.
9219  */
9220 void Texture::
9221 do_filter_2d_mipmap_pages(const CData *cdata,
9222  Texture::RamImage &to, const Texture::RamImage &from,
9223  int x_size, int y_size) const {
9224  Filter2DComponent *filter_component;
9225  Filter2DComponent *filter_alpha;
9226 
9227  if (is_srgb(cdata->_format)) {
9228  // We currently only support sRGB mipmap generation for unsigned byte
9229  // textures, due to our use of a lookup table.
9230  nassertv(cdata->_component_type == T_unsigned_byte);
9231 
9232  if (has_sse2_sRGB_encode()) {
9233  filter_component = &filter_2d_unsigned_byte_srgb_sse2;
9234  } else {
9235  filter_component = &filter_2d_unsigned_byte_srgb;
9236  }
9237 
9238  // Alpha is always linear.
9239  filter_alpha = &filter_2d_unsigned_byte;
9240 
9241  } else {
9242  switch (cdata->_component_type) {
9243  case T_unsigned_byte:
9244  filter_component = &filter_2d_unsigned_byte;
9245  break;
9246 
9247  case T_unsigned_short:
9248  filter_component = &filter_2d_unsigned_short;
9249  break;
9250 
9251  case T_float:
9252  filter_component = &filter_2d_float;
9253  break;
9254 
9255  default:
9256  gobj_cat.error()
9257  << "Unable to generate mipmaps for 2D texture with component type "
9258  << cdata->_component_type << "!";
9259  return;
9260  }
9261  filter_alpha = filter_component;
9262  }
9263 
9264  size_t pixel_size = cdata->_num_components * cdata->_component_width;
9265  size_t row_size = (size_t)x_size * pixel_size;
9266 
9267  int to_x_size = max(x_size >> 1, 1);
9268  int to_y_size = max(y_size >> 1, 1);
9269 
9270  size_t to_row_size = (size_t)to_x_size * pixel_size;
9271  to._page_size = (size_t)to_y_size * to_row_size;
9272  to._image = PTA_uchar::empty_array(to._page_size * cdata->_z_size * cdata->_num_views, get_class_type());
9273 
9274  bool alpha = has_alpha(cdata->_format);
9275  int num_color_components = cdata->_num_components;
9276  if (alpha) {
9277  --num_color_components;
9278  }
9279 
9280  int num_pages = cdata->_z_size * cdata->_num_views;
9281  for (int z = 0; z < num_pages; ++z) {
9282  // For each level.
9283  unsigned char *p = to._image.p() + z * to._page_size;
9284  nassertv(p <= to._image.p() + to._image.size() + to._page_size);
9285  const unsigned char *q = from._image.p() + z * from._page_size;
9286  nassertv(q <= from._image.p() + from._image.size() + from._page_size);
9287  if (y_size != 1) {
9288  int y;
9289  for (y = 0; y < y_size - 1; y += 2) {
9290  // For each row.
9291  nassertv(p == to._image.p() + z * to._page_size + (y / 2) * to_row_size);
9292  nassertv(q == from._image.p() + z * from._page_size + y * row_size);
9293  if (x_size != 1) {
9294  int x;
9295  for (x = 0; x < x_size - 1; x += 2) {
9296  // For each pixel.
9297  for (int c = 0; c < num_color_components; ++c) {
9298  // For each component.
9299  filter_component(p, q, pixel_size, row_size);
9300  }
9301  if (alpha) {
9302  filter_alpha(p, q, pixel_size, row_size);
9303  }
9304  q += pixel_size;
9305  }
9306  if (x < x_size) {
9307  // Skip the last odd pixel.
9308  q += pixel_size;
9309  }
9310  } else {
9311  // Just one pixel.
9312  for (int c = 0; c < num_color_components; ++c) {
9313  // For each component.
9314  filter_component(p, q, 0, row_size);
9315  }
9316  if (alpha) {
9317  filter_alpha(p, q, 0, row_size);
9318  }
9319  }
9320  q += row_size;
9322  }
9323  if (y < y_size) {
9324  // Skip the last odd row.
9325  q += row_size;
9326  }
9327  } else {
9328  // Just one row.
9329  if (x_size != 1) {
9330  int x;
9331  for (x = 0; x < x_size - 1; x += 2) {
9332  // For each pixel.
9333  for (int c = 0; c < num_color_components; ++c) {
9334  // For each component.
9335  filter_component(p, q, pixel_size, 0);
9336  }
9337  if (alpha) {
9338  filter_alpha(p, q, pixel_size, 0);
9339  }
9340  q += pixel_size;
9341  }
9342  if (x < x_size) {
9343  // Skip the last odd pixel.
9344  q += pixel_size;
9345  }
9346  } else {
9347  // Just one pixel.
9348  for (int c = 0; c < num_color_components; ++c) {
9349  // For each component.
9350  filter_component(p, q, 0, 0);
9351  }
9352  if (alpha) {
9353  filter_alpha(p, q, pixel_size, 0);
9354  }
9355  }
9356  }
9357 
9358  nassertv(p == to._image.p() + (z + 1) * to._page_size);
9359  nassertv(q == from._image.p() + (z + 1) * from._page_size);
9360  }
9361 }
9362 
9363 /**
9364  * Generates the next mipmap level from the previous one, treating all the
9365  * pages of the level as a single 3-d block of pixels.
9366  *
9367  * x_size, y_size, and z_size are the size of the previous level. They need
9368  * not be a power of 2, or even a multiple of 2.
9369  *
9370  * Assumes the lock is already held.
9371  */
9372 void Texture::
9373 do_filter_3d_mipmap_level(const CData *cdata,
9374  Texture::RamImage &to, const Texture::RamImage &from,
9375  int x_size, int y_size, int z_size) const {
9376  Filter3DComponent *filter_component;
9377  Filter3DComponent *filter_alpha;
9378 
9379  if (is_srgb(cdata->_format)) {
9380  // We currently only support sRGB mipmap generation for unsigned byte
9381  // textures, due to our use of a lookup table.
9382  nassertv(cdata->_component_type == T_unsigned_byte);
9383 
9384  if (has_sse2_sRGB_encode()) {
9385  filter_component = &filter_3d_unsigned_byte_srgb_sse2;
9386  } else {
9387  filter_component = &filter_3d_unsigned_byte_srgb;
9388  }
9389 
9390  // Alpha is always linear.
9391  filter_alpha = &filter_3d_unsigned_byte;
9392 
9393  } else {
9394  switch (cdata->_component_type) {
9395  case T_unsigned_byte:
9396  filter_component = &filter_3d_unsigned_byte;
9397  break;
9398 
9399  case T_unsigned_short:
9400  filter_component = &filter_3d_unsigned_short;
9401  break;
9402 
9403  case T_float:
9404  filter_component = &filter_3d_float;
9405  break;
9406 
9407  default:
9408  gobj_cat.error()
9409  << "Unable to generate mipmaps for 3D texture with component type "
9410  << cdata->_component_type << "!";
9411  return;
9412  }
9413  filter_alpha = filter_component;
9414  }
9415 
9416  size_t pixel_size = cdata->_num_components * cdata->_component_width;
9417  size_t row_size = (size_t)x_size * pixel_size;
9418  size_t page_size = (size_t)y_size * row_size;
9419  size_t view_size = (size_t)z_size * page_size;
9420 
9421  int to_x_size = max(x_size >> 1, 1);
9422  int to_y_size = max(y_size >> 1, 1);
9423  int to_z_size = max(z_size >> 1, 1);
9424 
9425  size_t to_row_size = (size_t)to_x_size * pixel_size;
9426  size_t to_page_size = (size_t)to_y_size * to_row_size;
9427  size_t to_view_size = (size_t)to_z_size * to_page_size;
9428  to._page_size = to_page_size;
9429  to._image = PTA_uchar::empty_array(to_page_size * to_z_size * cdata->_num_views, get_class_type());
9430 
9431  bool alpha = has_alpha(cdata->_format);
9432  int num_color_components = cdata->_num_components;
9433  if (alpha) {
9434  --num_color_components;
9435  }
9436 
9437  for (int view = 0; view < cdata->_num_views; ++view) {
9438  unsigned char *start_to = to._image.p() + view * to_view_size;
9439  const unsigned char *start_from = from._image.p() + view * view_size;
9440  nassertv(start_to + to_view_size <= to._image.p() + to._image.size());
9441  nassertv(start_from + view_size <= from._image.p() + from._image.size());
9442  unsigned char *p = start_to;
9443  const unsigned char *q = start_from;
9444  if (z_size != 1) {
9445  int z;
9446  for (z = 0; z < z_size - 1; z += 2) {
9447  // For each level.
9448  nassertv(p == start_to + (z / 2) * to_page_size);
9449  nassertv(q == start_from + z * page_size);
9450  if (y_size != 1) {
9451  int y;
9452  for (y = 0; y < y_size - 1; y += 2) {
9453  // For each row.
9454  nassertv(p == start_to + (z / 2) * to_page_size + (y / 2) * to_row_size);
9455  nassertv(q == start_from + z * page_size + y * row_size);
9456  if (x_size != 1) {
9457  int x;
9458  for (x = 0; x < x_size - 1; x += 2) {
9459  // For each pixel.
9460  for (int c = 0; c < num_color_components; ++c) {
9461  // For each component.
9462  filter_component(p, q, pixel_size, row_size, page_size);
9463  }
9464  if (alpha) {
9465  filter_alpha(p, q, pixel_size, row_size, page_size);
9466  }
9467  q += pixel_size;
9468  }
9469  if (x < x_size) {
9470  // Skip the last odd pixel.
9471  q += pixel_size;
9472  }
9473  } else {
9474  // Just one pixel.
9475  for (int c = 0; c < num_color_components; ++c) {
9476  // For each component.
9477  filter_component(p, q, 0, row_size, page_size);
9478  }
9479  if (alpha) {
9480  filter_alpha(p, q, 0, row_size, page_size);
9481  }
9482  }
9483  q += row_size;
9485  }
9486  if (y < y_size) {
9487  // Skip the last odd row.
9488  q += row_size;
9489  }
9490  } else {
9491  // Just one row.
9492  if (x_size != 1) {
9493  int x;
9494  for (x = 0; x < x_size - 1; x += 2) {
9495  // For each pixel.
9496  for (int c = 0; c < num_color_components; ++c) {
9497  // For each component.
9498  filter_component(p, q, pixel_size, 0, page_size);
9499  }
9500  if (alpha) {
9501  filter_alpha(p, q, pixel_size, 0, page_size);
9502  }
9503  q += pixel_size;
9504  }
9505  if (x < x_size) {
9506  // Skip the last odd pixel.
9507  q += pixel_size;
9508  }
9509  } else {
9510  // Just one pixel.
9511  for (int c = 0; c < num_color_components; ++c) {
9512  // For each component.
9513  filter_component(p, q, 0, 0, page_size);
9514  }
9515  if (alpha) {
9516  filter_alpha(p, q, 0, 0, page_size);
9517  }
9518  }
9519  }
9520  q += page_size;
9521  }
9522  if (z < z_size) {
9523  // Skip the last odd page.
9524  q += page_size;
9525  }
9526  } else {
9527  // Just one page.
9528  if (y_size != 1) {
9529  int y;
9530  for (y = 0; y < y_size - 1; y += 2) {
9531  // For each row.
9532  nassertv(p == start_to + (y / 2) * to_row_size);
9533  nassertv(q == start_from + y * row_size);
9534  if (x_size != 1) {
9535  int x;
9536  for (x = 0; x < x_size - 1; x += 2) {
9537  // For each pixel.
9538  for (int c = 0; c < num_color_components; ++c) {
9539  // For each component.
9540  filter_component(p, q, pixel_size, row_size, 0);
9541  }
9542  if (alpha) {
9543  filter_alpha(p, q, pixel_size, row_size, 0);
9544  }
9545  q += pixel_size;
9546  }
9547  if (x < x_size) {
9548  // Skip the last odd pixel.
9549  q += pixel_size;
9550  }
9551  } else {
9552  // Just one pixel.
9553  for (int c = 0; c < num_color_components; ++c) {
9554  // For each component.
9555  filter_component(p, q, 0, row_size, 0);
9556  }
9557  if (alpha) {
9558  filter_alpha(p, q, 0, row_size, 0);
9559  }
9560  }
9561  q += row_size;
9563  }
9564  if (y < y_size) {
9565  // Skip the last odd row.
9566  q += row_size;
9567  }
9568  } else {
9569  // Just one row.
9570  if (x_size != 1) {
9571  int x;
9572  for (x = 0; x < x_size - 1; x += 2) {
9573  // For each pixel.
9574  for (int c = 0; c < num_color_components; ++c) {
9575  // For each component.
9576  filter_component(p, q, pixel_size, 0, 0);
9577  }
9578  if (alpha) {
9579  filter_alpha(p, q, pixel_size, 0, 0);
9580  }
9581  q += pixel_size;
9582  }
9583  if (x < x_size) {
9584  // Skip the last odd pixel.
9585  q += pixel_size;
9586  }
9587  } else {
9588  // Just one pixel.
9589  for (int c = 0; c < num_color_components; ++c) {
9590  // For each component.
9591  filter_component(p, q, 0, 0, 0);
9592  }
9593  if (alpha) {
9594  filter_alpha(p, q, 0, 0, 0);
9595  }
9596  }
9597  }
9598  }
9599 
9600  nassertv(p == start_to + to_z_size * to_page_size);
9601  nassertv(q == start_from + z_size * page_size);
9602  }
9603 }
9604 
9605 /**
9606  * Averages a 2x2 block of pixel components into a single pixel component, for
9607  * producing the next mipmap level. Increments p and q to the next component.
9608  */
9609 void Texture::
9610 filter_2d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9611  size_t pixel_size, size_t row_size) {
9612  unsigned int result = ((unsigned int)q[0] +
9613  (unsigned int)q[pixel_size] +
9614  (unsigned int)q[row_size] +
9615  (unsigned int)q[pixel_size + row_size]) >> 2;
9616  *p = (unsigned char)result;
9617  ++p;
9618  ++q;
9619 }
9620 
9621 /**
9622  * Averages a 2x2 block of pixel components into a single pixel component, for
9623  * producing the next mipmap level. Increments p and q to the next component.
9624  */
9625 void Texture::
9626 filter_2d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9627  size_t pixel_size, size_t row_size) {
9628  float result = (decode_sRGB_float(q[0]) +
9629  decode_sRGB_float(q[pixel_size]) +
9630  decode_sRGB_float(q[row_size]) +
9631  decode_sRGB_float(q[pixel_size + row_size]));
9632 
9633  *p = encode_sRGB_uchar(result * 0.25f);
9634  ++p;
9635  ++q;
9636 }
9637 
9638 /**
9639  * Averages a 2x2 block of pixel components into a single pixel component, for
9640  * producing the next mipmap level. Increments p and q to the next component.
9641  */
9642 void Texture::
9643 filter_2d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9644  size_t pixel_size, size_t row_size) {
9645  float result = (decode_sRGB_float(q[0]) +
9646  decode_sRGB_float(q[pixel_size]) +
9647  decode_sRGB_float(q[row_size]) +
9648  decode_sRGB_float(q[pixel_size + row_size]));
9649 
9650  *p = encode_sRGB_uchar_sse2(result * 0.25f);
9651  ++p;
9652  ++q;
9653 }
9654 
9655 /**
9656  * Averages a 2x2 block of pixel components into a single pixel component, for
9657  * producing the next mipmap level. Increments p and q to the next component.
9658  */
9659 void Texture::
9660 filter_2d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9661  size_t pixel_size, size_t row_size) {
9662  unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9663  (unsigned int)*(unsigned short *)&q[pixel_size] +
9664  (unsigned int)*(unsigned short *)&q[row_size] +
9665  (unsigned int)*(unsigned short *)&q[pixel_size + row_size]) >> 2;
9666  store_unscaled_short(p, result);
9667  q += 2;
9668 }
9669 
9670 /**
9671  * Averages a 2x2 block of pixel components into a single pixel component, for
9672  * producing the next mipmap level. Increments p and q to the next component.
9673  */
9674 void Texture::
9675 filter_2d_float(unsigned char *&p, const unsigned char *&q,
9676  size_t pixel_size, size_t row_size) {
9677  *(float *)p = (*(float *)&q[0] +
9678  *(float *)&q[pixel_size] +
9679  *(float *)&q[row_size] +
9680  *(float *)&q[pixel_size + row_size]) / 4.0f;
9681  p += 4;
9682  q += 4;
9683 }
9684 
9685 /**
9686  * Averages a 2x2x2 block of pixel components into a single pixel component,
9687  * for producing the next mipmap level. Increments p and q to the next
9688  * component.
9689  */
9690 void Texture::
9691 filter_3d_unsigned_byte(unsigned char *&p, const unsigned char *&q,
9692  size_t pixel_size, size_t row_size, size_t page_size) {
9693  unsigned int result = ((unsigned int)q[0] +
9694  (unsigned int)q[pixel_size] +
9695  (unsigned int)q[row_size] +
9696  (unsigned int)q[pixel_size + row_size] +
9697  (unsigned int)q[page_size] +
9698  (unsigned int)q[pixel_size + page_size] +
9699  (unsigned int)q[row_size + page_size] +
9700  (unsigned int)q[pixel_size + row_size + page_size]) >> 3;
9701  *p = (unsigned char)result;
9702  ++p;
9703  ++q;
9704 }
9705 
9706 /**
9707  * Averages a 2x2x2 block of pixel components into a single pixel component,
9708  * for producing the next mipmap level. Increments p and q to the next
9709  * component.
9710  */
9711 void Texture::
9712 filter_3d_unsigned_byte_srgb(unsigned char *&p, const unsigned char *&q,
9713  size_t pixel_size, size_t row_size, size_t page_size) {
9714  float result = (decode_sRGB_float(q[0]) +
9715  decode_sRGB_float(q[pixel_size]) +
9716  decode_sRGB_float(q[row_size]) +
9717  decode_sRGB_float(q[pixel_size + row_size]) +
9718  decode_sRGB_float(q[page_size]) +
9719  decode_sRGB_float(q[pixel_size + page_size]) +
9720  decode_sRGB_float(q[row_size + page_size]) +
9721  decode_sRGB_float(q[pixel_size + row_size + page_size]));
9722 
9723  *p = encode_sRGB_uchar(result * 0.125f);
9724  ++p;
9725  ++q;
9726 }
9727 
9728 /**
9729  * Averages a 2x2x2 block of pixel components into a single pixel component,
9730  * for producing the next mipmap level. Increments p and q to the next
9731  * component.
9732  */
9733 void Texture::
9734 filter_3d_unsigned_byte_srgb_sse2(unsigned char *&p, const unsigned char *&q,
9735  size_t pixel_size, size_t row_size, size_t page_size) {
9736  float result = (decode_sRGB_float(q[0]) +
9737  decode_sRGB_float(q[pixel_size]) +
9738  decode_sRGB_float(q[row_size]) +
9739  decode_sRGB_float(q[pixel_size + row_size]) +
9740  decode_sRGB_float(q[page_size]) +
9741  decode_sRGB_float(q[pixel_size + page_size]) +
9742  decode_sRGB_float(q[row_size + page_size]) +
9743  decode_sRGB_float(q[pixel_size + row_size + page_size]));
9744 
9745  *p = encode_sRGB_uchar_sse2(result * 0.125f);
9746  ++p;
9747  ++q;
9748 }
9749 
9750 /**
9751  * Averages a 2x2x2 block of pixel components into a single pixel component,
9752  * for producing the next mipmap level. Increments p and q to the next
9753  * component.
9754  */
9755 void Texture::
9756 filter_3d_unsigned_short(unsigned char *&p, const unsigned char *&q,
9757  size_t pixel_size, size_t row_size,
9758  size_t page_size) {
9759  unsigned int result = ((unsigned int)*(unsigned short *)&q[0] +
9760  (unsigned int)*(unsigned short *)&q[pixel_size] +
9761  (unsigned int)*(unsigned short *)&q[row_size] +
9762  (unsigned int)*(unsigned short *)&q[pixel_size + row_size] +
9763  (unsigned int)*(unsigned short *)&q[page_size] +
9764  (unsigned int)*(unsigned short *)&q[pixel_size + page_size] +
9765  (unsigned int)*(unsigned short *)&q[row_size + page_size] +
9766  (unsigned int)*(unsigned short *)&q[pixel_size + row_size + page_size]) >> 3;
9767  store_unscaled_short(p, result);
9768  q += 2;
9769 }
9770 
9771 /**
9772  * Averages a 2x2x2 block of pixel components into a single pixel component,
9773  * for producing the next mipmap level. Increments p and q to the next
9774  * component.
9775  */
9776 void Texture::
9777 filter_3d_float(unsigned char *&p, const unsigned char *&q,
9778  size_t pixel_size, size_t row_size, size_t page_size) {
9779  *(float *)p = (*(float *)&q[0] +
9780  *(float *)&q[pixel_size] +
9781  *(float *)&q[row_size] +
9782  *(float *)&q[pixel_size + row_size] +
9783  *(float *)&q[page_size] +
9784  *(float *)&q[pixel_size + page_size] +
9785  *(float *)&q[row_size + page_size] +
9786  *(float *)&q[pixel_size + row_size + page_size]) / 8.0f;
9787  p += 4;
9788  q += 4;
9789 }
9790 
9791 /**
9792  * Invokes the squish library to compress the RAM image(s).
9793  */
9794 bool Texture::
9795 do_squish(CData *cdata, Texture::CompressionMode compression, int squish_flags) {
9796 #ifdef HAVE_SQUISH
9797  if (!do_has_all_ram_mipmap_images(cdata)) {
9798  // If we're about to compress the RAM image, we should ensure that we have
9799  // all of the mipmap levels first.
9800  do_generate_ram_mipmap_images(cdata, false);
9801  }
9802 
9803  RamImages compressed_ram_images;
9804  compressed_ram_images.reserve(cdata->_ram_images.size());
9805  for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9806  RamImage compressed_image;
9807  int x_size = do_get_expected_mipmap_x_size(cdata, n);
9808  int y_size = do_get_expected_mipmap_y_size(cdata, n);
9809  int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9810  int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9811  int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9812 
9813  compressed_image._page_size = page_size;
9814  compressed_image._image = PTA_uchar::empty_array(page_size * num_pages);
9815  for (int z = 0; z < num_pages; ++z) {
9816  unsigned char *dest_page = compressed_image._image.p() + z * page_size;
9817  unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * cdata->_ram_images[n]._page_size;
9818  unsigned const char *source_page_end = source_page + cdata->_ram_images[n]._page_size;
9819  // Convert one 4 x 4 cell at a time.
9820  unsigned char *d = dest_page;
9821  for (int y = 0; y < y_size; y += 4) {
9822  for (int x = 0; x < x_size; x += 4) {
9823  unsigned char tb[16 * 4];
9824  int mask = 0;
9825  unsigned char *t = tb;
9826  for (int i = 0; i < 16; ++i) {
9827  int xi = x + i % 4;
9828  int yi = y + i / 4;
9829  unsigned const char *s = source_page + (yi * x_size + xi) * cdata->_num_components;
9830  if (s < source_page_end) {
9831  switch (cdata->_num_components) {
9832  case 1:
9833  t[0] = s[0]; // r
9834  t[1] = s[0]; // g
9835  t[2] = s[0]; // b
9836  t[3] = 255; // a
9837  break;
9838 
9839  case 2:
9840  t[0] = s[0]; // r
9841  t[1] = s[0]; // g
9842  t[2] = s[0]; // b
9843  t[3] = s[1]; // a
9844  break;
9845 
9846  case 3:
9847  t[0] = s[2]; // r
9848  t[1] = s[1]; // g
9849  t[2] = s[0]; // b
9850  t[3] = 255; // a
9851  break;
9852 
9853  case 4:
9854  t[0] = s[2]; // r
9855  t[1] = s[1]; // g
9856  t[2] = s[0]; // b
9857  t[3] = s[3]; // a
9858  break;
9859  }
9860  mask |= (1 << i);
9861  }
9862  t += 4;
9863  }
9864  squish::CompressMasked(tb, mask, d, squish_flags);
9865  d += cell_size;
9867  }
9868  }
9869  }
9870  compressed_ram_images.push_back(compressed_image);
9871  }
9872  cdata->_ram_images.swap(compressed_ram_images);
9873  cdata->_ram_image_compression = compression;
9874  return true;
9875 
9876 #else // HAVE_SQUISH
9877  return false;
9878 
9879 #endif // HAVE_SQUISH
9880 }
9881 
9882 /**
9883  * Invokes the squish library to uncompress the RAM image(s).
9884  */
9885 bool Texture::
9886 do_unsquish(CData *cdata, int squish_flags) {
9887 #ifdef HAVE_SQUISH
9888  RamImages uncompressed_ram_images;
9889  uncompressed_ram_images.reserve(cdata->_ram_images.size());
9890  for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
9891  RamImage uncompressed_image;
9892  int x_size = do_get_expected_mipmap_x_size(cdata, n);
9893  int y_size = do_get_expected_mipmap_y_size(cdata, n);
9894  int num_pages = do_get_expected_mipmap_num_pages(cdata, n);
9895  int page_size = squish::GetStorageRequirements(x_size, y_size, squish_flags);
9896  int cell_size = squish::GetStorageRequirements(4, 4, squish_flags);
9897 
9898  uncompressed_image._page_size = do_get_expected_ram_mipmap_page_size(cdata, n);
9899  uncompressed_image._image = PTA_uchar::empty_array(uncompressed_image._page_size * num_pages);
9900  for (int z = 0; z < num_pages; ++z) {
9901  unsigned char *dest_page = uncompressed_image._image.p() + z * uncompressed_image._page_size;
9902  unsigned char *dest_page_end = dest_page + uncompressed_image._page_size;
9903  unsigned const char *source_page = cdata->_ram_images[n]._image.p() + z * page_size;
9904  // Unconvert one 4 x 4 cell at a time.
9905  unsigned const char *s = source_page;
9906  for (int y = 0; y < y_size; y += 4) {
9907  for (int x = 0; x < x_size; x += 4) {
9908  unsigned char tb[16 * 4];
9909  squish::Decompress(tb, s, squish_flags);
9910  s += cell_size;
9911 
9912  unsigned char *t = tb;
9913  for (int i = 0; i < 16; ++i) {
9914  int xi = x + i % 4;
9915  int yi = y + i / 4;
9916  unsigned char *d = dest_page + (yi * x_size + xi) * cdata->_num_components;
9917  if (d < dest_page_end) {
9918  switch (cdata->_num_components) {
9919  case 1:
9920  d[0] = t[1]; // g
9921  break;
9922 
9923  case 2:
9924  d[0] = t[1]; // g
9925  d[1] = t[3]; // a
9926  break;
9927 
9928  case 3:
9929  d[2] = t[0]; // r
9930  d[1] = t[1]; // g
9931  d[0] = t[2]; // b
9932  break;
9933 
9934  case 4:
9935  d[2] = t[0]; // r
9936  d[1] = t[1]; // g
9937  d[0] = t[2]; // b
9938  d[3] = t[3]; // a
9939  break;
9940  }
9941  }
9942  t += 4;
9943  }
9944  }
9946  }
9947  }
9948  uncompressed_ram_images.push_back(uncompressed_image);
9949  }
9950  cdata->_ram_images.swap(uncompressed_ram_images);
9951  cdata->_ram_image_compression = CM_off;
9952  return true;
9953 
9954 #else // HAVE_SQUISH
9955  return false;
9956 
9957 #endif // HAVE_SQUISH
9958 }
9959 
9960 /**
9961  * Factory method to generate a Texture object
9962  */
9965  BamReader::get_factory()->register_factory(get_class_type(), make_from_bam);
9966 }
9967 
9968 /**
9969  * Function to write the important information in the particular object to a
9970  * Datagram
9971  */
9973 write_datagram(BamWriter *manager, Datagram &me) {
9974  CDWriter cdata(_cycler, false);
9975 
9976  bool has_rawdata = false;
9977  do_write_datagram_header(cdata, manager, me, has_rawdata);
9978  do_write_datagram_body(cdata, manager, me);
9979 
9980  // If we are also including the texture's image data, then stuff it in here.
9981  if (has_rawdata) {
9982  do_write_datagram_rawdata(cdata, manager, me);
9983  }
9984 }
9985 
9986 /**
9987  * Called by the BamReader to perform any final actions needed for setting up
9988  * the object after all objects have been read and all pointers have been
9989  * completed.
9990  */
9992 finalize(BamReader *) {
9993  // Unref the pointer that we explicitly reffed in make_from_bam().
9994  unref();
9995 
9996  // We should never get back to zero after unreffing our own count, because
9997  // we expect to have been stored in a pointer somewhere. If we do get to
9998  // zero, it's a memory leak; the way to avoid this is to call unref_delete()
9999  // above instead of unref(), but this is dangerous to do from within a
10000  // virtual function.
10001  nassertv(get_ref_count() != 0);
10002 }
10003 
10004 
10005 /**
10006  * Writes the header part of the texture to the Datagram. This is the common
10007  * part that is shared by all Texture subclasses, and contains the filename
10008  * and rawdata flags. This method is not virtual because all Texture
10009  * subclasses must write the same data at this step.
10010  *
10011  * This part must be read first before calling do_fillin_body() to determine
10012  * whether to load the Texture from the TexturePool or directly from the bam
10013  * stream.
10014  *
10015  * After this call, has_rawdata will be filled with either true or false,
10016  * according to whether we expect to write the texture rawdata to the bam
10017  * stream following the texture body.
10018  */
10019 void Texture::
10020 do_write_datagram_header(CData *cdata, BamWriter *manager, Datagram &me, bool &has_rawdata) {
10021  // Write out the texture's raw pixel data if (a) the current Bam Texture
10022  // Mode requires that, or (b) there's no filename, so the file can't be
10023  // loaded up from disk, but the raw pixel data is currently available in
10024  // RAM.
10025 
10026  // Otherwise, we just write out the filename, and assume whoever loads the
10027  // bam file later will have access to the image file on disk.
10028  BamWriter::BamTextureMode file_texture_mode = manager->get_file_texture_mode();
10029  has_rawdata = (file_texture_mode == BamWriter::BTM_rawdata ||
10030  (cdata->_filename.empty() && do_has_bam_rawdata(cdata)));
10031  if (has_rawdata && !do_has_bam_rawdata(cdata)) {
10032  do_get_bam_rawdata(cdata);
10033  if (!do_has_bam_rawdata(cdata)) {
10034  // No image data after all.
10035  has_rawdata = false;
10036  }
10037  }
10038 
10039  bool has_bam_dir = !manager->get_filename().empty();
10040  Filename bam_dir = manager->get_filename().get_dirname();
10041  Filename filename = cdata->_filename;
10042  Filename alpha_filename = cdata->_alpha_filename;
10043 
10045 
10046  switch (file_texture_mode) {
10047  case BamWriter::BTM_unchanged:
10048  case BamWriter::BTM_rawdata:
10049  break;
10050 
10051  case BamWriter::BTM_fullpath:
10052  filename = cdata->_fullpath;
10053  alpha_filename = cdata->_alpha_fullpath;
10054  break;
10055 
10056  case BamWriter::BTM_relative:
10057  filename = cdata->_fullpath;
10058  alpha_filename = cdata->_alpha_fullpath;
10059  bam_dir.make_absolute(vfs->get_cwd());
10060  if (!has_bam_dir || !filename.make_relative_to(bam_dir, true)) {
10061  filename.find_on_searchpath(get_model_path());
10062  }
10063  if (gobj_cat.is_debug()) {
10064  gobj_cat.debug()
10065  << "Texture file " << cdata->_fullpath
10066  << " found as " << filename << "\n";
10067  }
10068  if (!has_bam_dir || !alpha_filename.make_relative_to(bam_dir, true)) {
10069  alpha_filename.find_on_searchpath(get_model_path());
10070  }
10071  if (gobj_cat.is_debug()) {
10072  gobj_cat.debug()
10073  << "Alpha image " << cdata->_alpha_fullpath
10074  << " found as " << alpha_filename << "\n";
10075  }
10076  break;
10077 
10078  case BamWriter::BTM_basename:
10079  filename = cdata->_fullpath.get_basename();
10080  alpha_filename = cdata->_alpha_fullpath.get_basename();
10081  break;
10082 
10083  default:
10084  gobj_cat.error()
10085  << "Unsupported bam-texture-mode: " << (int)file_texture_mode << "\n";
10086  }
10087 
10088  if (filename.empty()) {
10089  if (do_has_bam_rawdata(cdata) || cdata->_has_clear_color) {
10090  // If we don't have a filename, we have to store rawdata anyway.
10091  has_rawdata = true;
10092  }
10093  }
10094 
10095  me.add_string(get_name());
10096  me.add_string(filename);
10097  me.add_string(alpha_filename);
10098  me.add_uint8(cdata->_primary_file_num_channels);
10099  me.add_uint8(cdata->_alpha_file_channel);
10100  me.add_bool(has_rawdata);
10101 
10102  if (manager->get_file_minor_ver() < 25 &&
10103  cdata->_texture_type == TT_cube_map) {
10104  // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10105  // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10106  me.add_uint8(TT_2d_texture_array);
10107  } else {
10108  me.add_uint8(cdata->_texture_type);
10109  }
10110 
10111  if (manager->get_file_minor_ver() >= 32) {
10112  me.add_bool(cdata->_has_read_mipmaps);
10113  }
10114 }
10115 
10116 /**
10117  * Writes the body part of the texture to the Datagram. This is generally all
10118  * of the texture parameters except for the header and the rawdata.
10119  */
10120 void Texture::
10121 do_write_datagram_body(CData *cdata, BamWriter *manager, Datagram &me) {
10122  if (manager->get_file_minor_ver() >= 36) {
10123  cdata->_default_sampler.write_datagram(me);
10124  } else {
10125  const SamplerState &s = cdata->_default_sampler;
10126  me.add_uint8(s.get_wrap_u());
10127  me.add_uint8(s.get_wrap_v());
10128  me.add_uint8(s.get_wrap_w());
10129  me.add_uint8(s.get_minfilter());
10130  me.add_uint8(s.get_magfilter());
10132  s.get_border_color().write_datagram(me);
10133  }
10134 
10135  me.add_uint8(cdata->_compression);
10136  me.add_uint8(cdata->_quality_level);
10137 
10138  me.add_uint8(cdata->_format);
10139  me.add_uint8(cdata->_num_components);
10140 
10141  if (cdata->_texture_type == TT_buffer_texture) {
10142  me.add_uint8(cdata->_usage_hint);
10143  }
10144 
10145  if (manager->get_file_minor_ver() >= 28) {
10146  me.add_uint8(cdata->_auto_texture_scale);
10147  }
10148  me.add_uint32(cdata->_orig_file_x_size);
10149  me.add_uint32(cdata->_orig_file_y_size);
10150 
10151  bool has_simple_ram_image = !cdata->_simple_ram_image._image.empty();
10153 
10154  // Write out the simple image too, so it will be available later.
10155  if (has_simple_ram_image) {
10156  me.add_uint32(cdata->_simple_x_size);
10157  me.add_uint32(cdata->_simple_y_size);
10158  me.add_int32(cdata->_simple_image_date_generated);
10159  me.add_uint32(cdata->_simple_ram_image._image.size());
10160  me.append_data(cdata->_simple_ram_image._image, cdata->_simple_ram_image._image.size());
10161  }
10162 
10163  if (manager->get_file_minor_ver() >= 45) {
10164  me.add_bool(cdata->_has_clear_color);
10165  if (cdata->_has_clear_color) {
10166  cdata->_clear_color.write_datagram(me);
10167  }
10168  }
10169 }
10170 
10171 /**
10172  * Writes the rawdata part of the texture to the Datagram.
10173  */
10174 void Texture::
10175 do_write_datagram_rawdata(CData *cdata, BamWriter *manager, Datagram &me) {
10176  me.add_uint32(cdata->_x_size);
10177  me.add_uint32(cdata->_y_size);
10178  me.add_uint32(cdata->_z_size);
10179 
10180  if (manager->get_file_minor_ver() >= 30) {
10181  me.add_uint32(cdata->_pad_x_size);
10182  me.add_uint32(cdata->_pad_y_size);
10183  me.add_uint32(cdata->_pad_z_size);
10184  }
10185 
10186  if (manager->get_file_minor_ver() >= 26) {
10187  me.add_uint32(cdata->_num_views);
10188  }
10189  me.add_uint8(cdata->_component_type);
10190  me.add_uint8(cdata->_component_width);
10191  me.add_uint8(cdata->_ram_image_compression);
10192 
10193  if (cdata->_ram_images.empty() && cdata->_has_clear_color &&
10194  manager->get_file_minor_ver() < 45) {
10195  // For older .bam versions that don't support clear colors, make up a RAM
10196  // image.
10197  int image_size = do_get_expected_ram_image_size(cdata);
10198  me.add_uint8(1);
10199  me.add_uint32(do_get_expected_ram_page_size(cdata));
10200  me.add_uint32(image_size);
10201 
10202  // Fill the image with the clear color.
10203  unsigned char pixel[16];
10204  const int pixel_size = do_get_clear_data(cdata, pixel);
10205  nassertv(pixel_size > 0);
10206 
10207  for (int i = 0; i < image_size; i += pixel_size) {
10208  me.append_data(pixel, pixel_size);
10209  }
10210  } else {
10211  me.add_uint8(cdata->_ram_images.size());
10212  for (size_t n = 0; n < cdata->_ram_images.size(); ++n) {
10213  me.add_uint32(cdata->_ram_images[n]._page_size);
10214  me.add_uint32(cdata->_ram_images[n]._image.size());
10215  me.append_data(cdata->_ram_images[n]._image, cdata->_ram_images[n]._image.size());
10216  }
10217  }
10218 }
10219 
10220 /**
10221  * Factory method to generate a Texture object
10222  */
10223 TypedWritable *Texture::
10224 make_from_bam(const FactoryParams &params) {
10225  PT(Texture) dummy = new Texture;
10226  return dummy->make_this_from_bam(params);
10227 }
10228 
10229 /**
10230  * Called by make_from_bam() once the particular subclass of Texture is known.
10231  * This is called on a newly-constructed Texture object of the appropriate
10232  * subclass. It will return either the same Texture object (e.g. this), or a
10233  * different Texture object loaded via the TexturePool, as appropriate.
10234  */
10235 TypedWritable *Texture::
10236 make_this_from_bam(const FactoryParams &params) {
10237  // The process of making a texture is slightly different than making other
10238  // TypedWritable objects. That is because all creation of Textures should
10239  // be done through calls to TexturePool, which ensures that any loads of the
10240  // same filename refer to the same memory.
10241 
10242  DatagramIterator scan;
10243  BamReader *manager;
10244 
10245  parse_params(params, scan, manager);
10246 
10247  // Get the header information--the filenames and texture type--so we can
10248  // look up the file on disk first.
10249  string name = scan.get_string();
10250  Filename filename = scan.get_string();
10251  Filename alpha_filename = scan.get_string();
10252 
10253  int primary_file_num_channels = scan.get_uint8();
10254  int alpha_file_channel = scan.get_uint8();
10255  bool has_rawdata = scan.get_bool();
10256  TextureType texture_type = (TextureType)scan.get_uint8();
10257  if (manager->get_file_minor_ver() < 25) {
10258  // Between Panda3D releases 1.7.2 and 1.8.0 (bam versions 6.24 and 6.25),
10259  // we added TT_2d_texture_array, shifting the definition for TT_cube_map.
10260  if (texture_type == TT_2d_texture_array) {
10261  texture_type = TT_cube_map;
10262  }
10263  }
10264  bool has_read_mipmaps = false;
10265  if (manager->get_file_minor_ver() >= 32) {
10266  has_read_mipmaps = scan.get_bool();
10267  }
10268 
10269  Texture *me = nullptr;
10270  if (has_rawdata) {
10271  // If the raw image data is included, then just load the texture directly
10272  // from the stream, and return it. In this case we return the "this"
10273  // pointer, since it's a newly-created Texture object of the appropriate
10274  // type.
10275  me = this;
10276  me->set_name(name);
10277  CDWriter cdata_me(me->_cycler, true);
10278  cdata_me->_filename = filename;
10279  cdata_me->_alpha_filename = alpha_filename;
10280  cdata_me->_primary_file_num_channels = primary_file_num_channels;
10281  cdata_me->_alpha_file_channel = alpha_file_channel;
10282  cdata_me->_texture_type = texture_type;
10283  cdata_me->_has_read_mipmaps = has_read_mipmaps;
10284 
10285  // Read the texture attributes directly from the bam stream.
10286  me->do_fillin_body(cdata_me, scan, manager);
10287  me->do_fillin_rawdata(cdata_me, scan, manager);
10288 
10289  // To manage the reference count, explicitly ref it now, then unref it in
10290  // the finalize callback.
10291  me->ref();
10292  manager->register_finalize(me);
10293 
10294  } else {
10295  // The raw image data isn't included, so we'll be loading the Texture via
10296  // the TexturePool. In this case we use the "this" pointer as a temporary
10297  // object to read all of the attributes from the bam stream.
10298  Texture *dummy = this;
10299  AutoTextureScale auto_texture_scale = ATS_unspecified;
10300  {
10301  CDWriter cdata_dummy(dummy->_cycler, true);
10302  dummy->do_fillin_body(cdata_dummy, scan, manager);
10303  auto_texture_scale = cdata_dummy->_auto_texture_scale;
10304  }
10305 
10306  if (filename.empty()) {
10307  // This texture has no filename; since we don't have an image to load,
10308  // we can't actually create the texture.
10309  gobj_cat.info()
10310  << "Cannot create texture '" << name << "' with no filename.\n";
10311 
10312  } else {
10313  // This texture does have a filename, so try to load it from disk.
10315  if (!manager->get_filename().empty()) {
10316  // If texture filename was given relative to the bam filename, expand
10317  // it now.
10318  Filename bam_dir = manager->get_filename().get_dirname();
10319  vfs->resolve_filename(filename, bam_dir);
10320  if (!alpha_filename.empty()) {
10321  vfs->resolve_filename(alpha_filename, bam_dir);
10322  }
10323  }
10324 
10325  LoaderOptions options = manager->get_loader_options();
10326  if (dummy->uses_mipmaps()) {
10327  options.set_texture_flags(options.get_texture_flags() | LoaderOptions::TF_generate_mipmaps);
10328  }
10329  options.set_auto_texture_scale(auto_texture_scale);
10330 
10331  switch (texture_type) {
10332  case TT_buffer_texture:
10333  case TT_1d_texture:
10334  case TT_2d_texture:
10335  case TT_1d_texture_array:
10336  if (alpha_filename.empty()) {
10337  me = TexturePool::load_texture(filename, primary_file_num_channels,
10338  has_read_mipmaps, options);
10339  } else {
10340  me = TexturePool::load_texture(filename, alpha_filename,
10341  primary_file_num_channels,
10342  alpha_file_channel,
10343  has_read_mipmaps, options);
10344  }
10345  break;
10346 
10347  case TT_3d_texture:
10348  me = TexturePool::load_3d_texture(filename, has_read_mipmaps, options);
10349  break;
10350 
10351  case TT_2d_texture_array:
10352  case TT_cube_map_array:
10353  me = TexturePool::load_2d_texture_array(filename, has_read_mipmaps, options);
10354  break;
10355 
10356  case TT_cube_map:
10357  me = TexturePool::load_cube_map(filename, has_read_mipmaps, options);
10358  break;
10359  }
10360  }
10361 
10362  if (me != nullptr) {
10363  me->set_name(name);
10364  CDWriter cdata_me(me->_cycler, true);
10365  me->do_fillin_from(cdata_me, dummy);
10366 
10367  // Since in this case me was loaded from the TexturePool, there's no
10368  // need to explicitly manage the reference count. TexturePool will hold
10369  // it safely.
10370  }
10371  }
10372 
10373  return me;
10374 }
10375 
10376 /**
10377  * Reads in the part of the Texture that was written with
10378  * do_write_datagram_body().
10379  */
10380 void Texture::
10381 do_fillin_body(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10382  cdata->_default_sampler.read_datagram(scan, manager);
10383 
10384  if (manager->get_file_minor_ver() >= 1) {
10385  cdata->_compression = (CompressionMode)scan.get_uint8();
10386  }
10387  if (manager->get_file_minor_ver() >= 16) {
10388  cdata->_quality_level = (QualityLevel)scan.get_uint8();
10389  }
10390 
10391  cdata->_format = (Format)scan.get_uint8();
10392  cdata->_num_components = scan.get_uint8();
10393 
10394  if (cdata->_texture_type == TT_buffer_texture) {
10395  cdata->_usage_hint = (GeomEnums::UsageHint)scan.get_uint8();
10396  }
10397 
10398  cdata->inc_properties_modified();
10399 
10400  cdata->_auto_texture_scale = ATS_unspecified;
10401  if (manager->get_file_minor_ver() >= 28) {
10402  cdata->_auto_texture_scale = (AutoTextureScale)scan.get_uint8();
10403  }
10404 
10405  bool has_simple_ram_image = false;
10406  if (manager->get_file_minor_ver() >= 18) {
10407  cdata->_orig_file_x_size = scan.get_uint32();
10408  cdata->_orig_file_y_size = scan.get_uint32();
10409 
10410  has_simple_ram_image = scan.get_bool();
10411  }
10412 
10413  if (has_simple_ram_image) {
10414  cdata->_simple_x_size = scan.get_uint32();
10415  cdata->_simple_y_size = scan.get_uint32();
10416  cdata->_simple_image_date_generated = scan.get_int32();
10417 
10418  size_t u_size = scan.get_uint32();
10419 
10420  // Protect against large allocation.
10421  if (u_size > scan.get_remaining_size()) {
10422  gobj_cat.error()
10423  << "simple RAM image extends past end of datagram, is texture corrupt?\n";
10424  return;
10425  }
10426 
10427  PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10428  scan.extract_bytes(image.p(), u_size);
10429 
10430  cdata->_simple_ram_image._image = image;
10431  cdata->_simple_ram_image._page_size = u_size;
10432  cdata->inc_simple_image_modified();
10433  }
10434 
10435  if (manager->get_file_minor_ver() >= 45) {
10436  cdata->_has_clear_color = scan.get_bool();
10437  if (cdata->_has_clear_color) {
10438  cdata->_clear_color.read_datagram(scan);
10439  }
10440  }
10441 }
10442 
10443 /**
10444  * Reads in the part of the Texture that was written with
10445  * do_write_datagram_rawdata().
10446  */
10447 void Texture::
10448 do_fillin_rawdata(CData *cdata, DatagramIterator &scan, BamReader *manager) {
10449  cdata->_x_size = scan.get_uint32();
10450  cdata->_y_size = scan.get_uint32();
10451  cdata->_z_size = scan.get_uint32();
10452 
10453  if (manager->get_file_minor_ver() >= 30) {
10454  cdata->_pad_x_size = scan.get_uint32();
10455  cdata->_pad_y_size = scan.get_uint32();
10456  cdata->_pad_z_size = scan.get_uint32();
10457  } else {
10458  do_set_pad_size(cdata, 0, 0, 0);
10459  }
10460 
10461  cdata->_num_views = 1;
10462  if (manager->get_file_minor_ver() >= 26) {
10463  cdata->_num_views = scan.get_uint32();
10464  }
10465  cdata->_component_type = (ComponentType)scan.get_uint8();
10466  cdata->_component_width = scan.get_uint8();
10467  cdata->_ram_image_compression = CM_off;
10468  if (manager->get_file_minor_ver() >= 1) {
10469  cdata->_ram_image_compression = (CompressionMode)scan.get_uint8();
10470  }
10471 
10472  int num_ram_images = 1;
10473  if (manager->get_file_minor_ver() >= 3) {
10474  num_ram_images = scan.get_uint8();
10475  }
10476 
10477  cdata->_ram_images.clear();
10478  cdata->_ram_images.reserve(num_ram_images);
10479  for (int n = 0; n < num_ram_images; ++n) {
10480  cdata->_ram_images.push_back(RamImage());
10481  cdata->_ram_images[n]._page_size = get_expected_ram_page_size();
10482  if (manager->get_file_minor_ver() >= 1) {
10483  cdata->_ram_images[n]._page_size = scan.get_uint32();
10484  }
10485 
10486  // fill the cdata->_image buffer with image data
10487  size_t u_size = scan.get_uint32();
10488 
10489  // Protect against large allocation.
10490  if (u_size > scan.get_remaining_size()) {
10491  gobj_cat.error()
10492  << "RAM image " << n << " extends past end of datagram, is texture corrupt?\n";
10493  return;
10494  }
10495 
10496  PTA_uchar image = PTA_uchar::empty_array(u_size, get_class_type());
10497  scan.extract_bytes(image.p(), u_size);
10498 
10499  cdata->_ram_images[n]._image = image;
10500  }
10501  cdata->_loaded_from_image = true;
10502  cdata->inc_image_modified();
10503 }
10504 
10505 /**
10506  * Called in make_from_bam(), this method properly copies the attributes from
10507  * the bam stream (as stored in dummy) into this texture, updating the
10508  * modified flags appropriately.
10509  */
10510 void Texture::
10511 do_fillin_from(CData *cdata, const Texture *dummy) {
10512  // Use the setters instead of setting these directly, so we can correctly
10513  // avoid incrementing cdata->_properties_modified if none of these actually
10514  // change. (Otherwise, we'd have to reload the texture to the GSG every
10515  // time we loaded a new bam file that reference the texture, since each bam
10516  // file reference passes through this function.)
10517 
10518  CDReader cdata_dummy(dummy->_cycler);
10519 
10520  do_set_wrap_u(cdata, cdata_dummy->_default_sampler.get_wrap_u());
10521  do_set_wrap_v(cdata, cdata_dummy->_default_sampler.get_wrap_v());
10522  do_set_wrap_w(cdata, cdata_dummy->_default_sampler.get_wrap_w());
10523  do_set_border_color(cdata, cdata_dummy->_default_sampler.get_border_color());
10524 
10525  if (cdata_dummy->_default_sampler.get_minfilter() != SamplerState::FT_default) {
10526  do_set_minfilter(cdata, cdata_dummy->_default_sampler.get_minfilter());
10527  }
10528  if (cdata_dummy->_default_sampler.get_magfilter() != SamplerState::FT_default) {
10529  do_set_magfilter(cdata, cdata_dummy->_default_sampler.get_magfilter());
10530  }
10531  if (cdata_dummy->_default_sampler.get_anisotropic_degree() != 0) {
10532  do_set_anisotropic_degree(cdata, cdata_dummy->_default_sampler.get_anisotropic_degree());
10533  }
10534  if (cdata_dummy->_compression != CM_default) {
10535  do_set_compression(cdata, cdata_dummy->_compression);
10536  }
10537  if (cdata_dummy->_quality_level != QL_default) {
10538  do_set_quality_level(cdata, cdata_dummy->_quality_level);
10539  }
10540 
10541  Format format = cdata_dummy->_format;
10542  int num_components = cdata_dummy->_num_components;
10543 
10544  if (num_components == cdata->_num_components) {
10545  // Only reset the format if the number of components hasn't changed, since
10546  // if the number of components has changed our texture no longer matches
10547  // what it was when the bam was written.
10548  do_set_format(cdata, format);
10549  }
10550 
10551  if (!cdata_dummy->_simple_ram_image._image.empty()) {
10552  // Only replace the simple ram image if it was generated more recently
10553  // than the one we already have.
10554  if (cdata->_simple_ram_image._image.empty() ||
10555  cdata_dummy->_simple_image_date_generated > cdata->_simple_image_date_generated) {
10556  do_set_simple_ram_image(cdata,
10557  cdata_dummy->_simple_ram_image._image,
10558  cdata_dummy->_simple_x_size,
10559  cdata_dummy->_simple_y_size);
10560  cdata->_simple_image_date_generated = cdata_dummy->_simple_image_date_generated;
10561  }
10562  }
10563 }
10564 
10565 /**
10566  *
10567  */
10568 Texture::CData::
10569 CData() {
10570  _primary_file_num_channels = 0;
10571  _alpha_file_channel = 0;
10572  _keep_ram_image = true;
10573  _compression = CM_default;
10574  _auto_texture_scale = ATS_unspecified;
10575  _ram_image_compression = CM_off;
10576  _render_to_texture = false;
10577  _match_framebuffer_format = false;
10578  _post_load_store_cache = false;
10579  _quality_level = QL_default;
10580 
10581  _texture_type = TT_2d_texture;
10582  _x_size = 0;
10583  _y_size = 1;
10584  _z_size = 1;
10585  _num_views = 1;
10586 
10587  // We will override the format in a moment (in the Texture constructor), but
10588  // set it to something else first to avoid the check in do_set_format
10589  // depending on an uninitialized value.
10590  _format = F_rgba;
10591 
10592  // Only used for buffer textures.
10593  _usage_hint = GeomEnums::UH_unspecified;
10594 
10595  _pad_x_size = 0;
10596  _pad_y_size = 0;
10597  _pad_z_size = 0;
10598 
10599  _orig_file_x_size = 0;
10600  _orig_file_y_size = 0;
10601 
10602  _loaded_from_image = false;
10603  _loaded_from_txo = false;
10604  _has_read_pages = false;
10605  _has_read_mipmaps = false;
10606  _num_mipmap_levels_read = 0;
10607 
10608  _simple_x_size = 0;
10609  _simple_y_size = 0;
10610  _simple_ram_image._page_size = 0;
10611 
10612  _has_clear_color = false;
10613 }
10614 
10615 /**
10616  *
10617  */
10618 Texture::CData::
10619 CData(const Texture::CData &copy) {
10620  _num_mipmap_levels_read = 0;
10621 
10622  do_assign(&copy);
10623 
10624  _properties_modified = copy._properties_modified;
10625  _image_modified = copy._image_modified;
10626  _simple_image_modified = copy._simple_image_modified;
10627 }
10628 
10629 /**
10630  *
10631  */
10632 CycleData *Texture::CData::
10633 make_copy() const {
10634  return new CData(*this);
10635 }
10636 
10637 /**
10638  *
10639  */
10640 void Texture::CData::
10641 do_assign(const Texture::CData *copy) {
10642  _filename = copy->_filename;
10643  _alpha_filename = copy->_alpha_filename;
10644  if (!copy->_fullpath.empty()) {
10645  // Since the fullpath is often empty on a file loaded directly from a txo,
10646  // we only assign the fullpath if it is not empty.
10647  _fullpath = copy->_fullpath;
10648  _alpha_fullpath = copy->_alpha_fullpath;
10649  }
10650  _primary_file_num_channels = copy->_primary_file_num_channels;
10651  _alpha_file_channel = copy->_alpha_file_channel;
10652  _x_size = copy->_x_size;
10653  _y_size = copy->_y_size;
10654  _z_size = copy->_z_size;
10655  _num_views = copy->_num_views;
10656  _pad_x_size = copy->_pad_x_size;
10657  _pad_y_size = copy->_pad_y_size;
10658  _pad_z_size = copy->_pad_z_size;
10659  _orig_file_x_size = copy->_orig_file_x_size;
10660  _orig_file_y_size = copy->_orig_file_y_size;
10661  _num_components = copy->_num_components;
10662  _component_width = copy->_component_width;
10663  _texture_type = copy->_texture_type;
10664  _format = copy->_format;
10665  _component_type = copy->_component_type;
10666  _loaded_from_image = copy->_loaded_from_image;
10667  _loaded_from_txo = copy->_loaded_from_txo;
10668  _has_read_pages = copy->_has_read_pages;
10669  _has_read_mipmaps = copy->_has_read_mipmaps;
10670  _num_mipmap_levels_read = copy->_num_mipmap_levels_read;
10671  _default_sampler = copy->_default_sampler;
10672  _keep_ram_image = copy->_keep_ram_image;
10673  _compression = copy->_compression;
10674  _match_framebuffer_format = copy->_match_framebuffer_format;
10675  _quality_level = copy->_quality_level;
10676  _auto_texture_scale = copy->_auto_texture_scale;
10677  _ram_image_compression = copy->_ram_image_compression;
10678  _ram_images = copy->_ram_images;
10679  _simple_x_size = copy->_simple_x_size;
10680  _simple_y_size = copy->_simple_y_size;
10681  _simple_ram_image = copy->_simple_ram_image;
10682 }
10683 
10684 /**
10685  * Writes the contents of this object to the datagram for shipping out to a
10686  * Bam file.
10687  */
10688 void Texture::CData::
10689 write_datagram(BamWriter *manager, Datagram &dg) const {
10690 }
10691 
10692 /**
10693  * Receives an array of pointers, one for each time manager->read_pointer()
10694  * was called in fillin(). Returns the number of pointers processed.
10695  */
10696 int Texture::CData::
10697 complete_pointers(TypedWritable **p_list, BamReader *manager) {
10698  return 0;
10699 }
10700 
10701 /**
10702  * This internal function is called by make_from_bam to read in all of the
10703  * relevant data from the BamFile for the new Geom.
10704  */
10705 void Texture::CData::
10706 fillin(DatagramIterator &scan, BamReader *manager) {
10707 }
10708 
10709 /**
10710  *
10711  */
10712 ostream &
10713 operator << (ostream &out, Texture::TextureType tt) {
10714  return out << Texture::format_texture_type(tt);
10715 }
10716 
10717 /**
10718  *
10719  */
10720 ostream &
10721 operator << (ostream &out, Texture::ComponentType ct) {
10722  return out << Texture::format_component_type(ct);
10723 }
10724 
10725 /**
10726  *
10727  */
10728 ostream &
10729 operator << (ostream &out, Texture::Format f) {
10730  return out << Texture::format_format(f);
10731 }
10732 
10733 /**
10734  *
10735  */
10736 ostream &
10737 operator << (ostream &out, Texture::CompressionMode cm) {
10738  return out << Texture::format_compression_mode(cm);
10739 }
10740 
10741 /**
10742  *
10743  */
10744 ostream &
10745 operator << (ostream &out, Texture::QualityLevel tql) {
10746  return out << Texture::format_quality_level(tql);
10747 }
10748 
10749 /**
10750  *
10751  */
10752 istream &
10753 operator >> (istream &in, Texture::QualityLevel &tql) {
10754  string word;
10755  in >> word;
10756 
10757  tql = Texture::string_quality_level(word);
10758  return in;
10759 }
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void parse_params(const FactoryParams &params, DatagramIterator &scan, BamReader *&manager)
Takes in a FactoryParams, passed from a WritableFactory into any TypedWritable's make function,...
Definition: bamReader.I:275
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
This class represents a thread-safe handle to a promised future result of an asynchronous operation,...
Definition: asyncFuture.h:61
An instance of this class is written to the front of a Bam or Txo file to make the file a cached inst...
void add_dependent_file(const Filename &pathname)
Adds the indicated file to the list of files that will be loaded to generate the data in this record.
get_data
Returns a pointer to the data stored in the record, or NULL if there is no data.
set_data
Stores a new data object on the record.
This class maintains a cache of Bam and/or Txo objects generated from model files and texture images ...
Definition: bamCache.h:42
get_cache_textures
Returns whether texture files (e.g.
Definition: bamCache.h:90
bool store(BamCacheRecord *record)
Flushes a cache entry to disk.
Definition: bamCache.cxx:194
static BamCache * get_global_ptr()
Returns a pointer to the global BamCache object, which is used automatically by the ModelPool and Tex...
Definition: bamCache.I:223
get_cache_compressed_textures
Returns whether compressed texture files will be stored in the cache, as compressed txo files.
Definition: bamCache.h:92
This is the fundamental interface for extracting binary objects from a Bam file, as generated by a Ba...
Definition: bamReader.h:110
void register_finalize(TypedWritable *whom)
Should be called by an object reading itself from the Bam file to indicate that this particular objec...
Definition: bamReader.cxx:808
bool resolve()
This may be called at any time during processing of the Bam file to resolve all the known pointers so...
Definition: bamReader.cxx:325
bool init()
Initializes the BamReader prior to reading any objects from its source.
Definition: bamReader.cxx:85
get_filename
If a BAM is a file, then the BamReader should contain the name of the file.
Definition: bamReader.h:155
TypedWritable * read_object()
Reads a single object from the Bam file.
Definition: bamReader.cxx:224
get_loader_options
Returns the LoaderOptions passed to the loader when the model was requested, if any.
Definition: bamReader.h:156
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being read.
Definition: bamReader.I:83
static WritableFactory * get_factory()
Returns the global WritableFactory for generating TypedWritable objects.
Definition: bamReader.I:177
This is the fundamental interface for writing binary objects to a Bam file, to be extracted later by ...
Definition: bamWriter.h:63
get_file_texture_mode
Returns the BamTextureMode preference indicated by the Bam file currently being written.
Definition: bamWriter.h:95
get_filename
If a BAM is a file, then the BamWriter should contain the name of the file.
Definition: bamWriter.h:92
int get_file_minor_ver() const
Returns the minor version number of the Bam file currently being written.
Definition: bamWriter.I:59
get_active
Returns the active flag associated with this object.
Definition: bufferContext.h:55
get_resident
Returns the resident flag associated with this object.
Definition: bufferContext.h:56
get_data_size_bytes
Returns the number of bytes previously reported for the data object.
Definition: bufferContext.h:53
void notify_all()
Informs all of the other threads who are currently blocked on wait() that the relevant condition has ...
void wait()
Waits on the condition.
This class specializes ConfigVariable as an enumerated type.
int get_word(size_t n) const
Returns the variable's nth value.
std::string get_unique_value(size_t n) const
Returns the nth unique value of the variable.
size_t get_num_unique_values() const
Returns the number of unique values in the variable.
PointerToArray< Element > cast_non_const() const
Casts away the constness of the CPTA(Element), and returns an equivalent PTA(Element).
This collects together the pieces of data that are accumulated for each node while walking the scene ...
This object performs a depth-first traversal of the scene graph, with optional view-frustum culling,...
Definition: cullTraverser.h:45
This template class calls PipelineCycler::read() in the constructor and PipelineCycler::release_read(...
This template class calls PipelineCycler::read_unlocked(), and then provides a transparent read-only ...
This template class calls PipelineCycler::write() in the constructor and PipelineCycler::release_writ...
A single page of data maintained by a PipelineCycler.
Definition: cycleData.h:50
This class can be used to read a binary file that consists of an arbitrary header followed by a numbe...
bool read_header(std::string &header, size_t num_bytes)
Reads a sequence of bytes from the beginning of the datagram file.
bool open(const FileReference *file)
Opens the indicated filename for reading.
A class to retrieve the individual data elements previously stored in a Datagram.
uint8_t get_uint8()
Extracts an unsigned 8-bit integer.
vector_uchar extract_bytes(size_t size)
Extracts the indicated number of bytes in the datagram and returns them as a string.
uint32_t get_uint32()
Extracts an unsigned 32-bit integer.
bool get_bool()
Extracts a boolean value.
std::string get_string()
Extracts a variable-length string.
int32_t get_int32()
Extracts a signed 32-bit integer.
size_t get_remaining_size() const
Return the bytes left in the datagram.
This class can be used to write a binary file that consists of an arbitrary header followed by a numb...
bool open(const FileReference *file)
Opens the indicated filename for writing.
bool write_header(const std::string &header)
Writes a sequence of bytes to the beginning of the datagram file.
An ordered list of data elements, formatted in memory for transmission over a socket or writing to a ...
Definition: datagram.h:38
void add_uint32(uint32_t value)
Adds an unsigned 32-bit integer to the datagram.
Definition: datagram.I:94
void add_int16(int16_t value)
Adds a signed 16-bit integer to the datagram.
Definition: datagram.I:58
void add_int32(int32_t value)
Adds a signed 32-bit integer to the datagram.
Definition: datagram.I:67
void add_uint8(uint8_t value)
Adds an unsigned 8-bit integer to the datagram.
Definition: datagram.I:50
void add_bool(bool value)
Adds a boolean value to the datagram.
Definition: datagram.I:34
void append_data(const void *data, size_t size)
Appends some more raw data to the end of the datagram.
Definition: datagram.cxx:129
void add_string(const std::string &str)
Adds a variable-length string to the datagram.
Definition: datagram.I:219
An instance of this class is passed to the Factory when requesting it to do its business and construc...
Definition: factoryParams.h:36
void register_factory(TypeHandle handle, CreateFunc *func, void *user_data=nullptr)
Registers a new kind of thing the Factory will be able to create.
Definition: factory.I:73
The name of a file, such as a texture file or an Egg file.
Definition: filename.h:39
std::string get_basename() const
Returns the basename part of the filename.
Definition: filename.I:367
Filename get_filename_index(int index) const
If the pattern flag is set for this Filename and the filename string actually includes a sequence of ...
Definition: filename.cxx:836
bool has_hash() const
Returns true if the filename is indicated to be a filename pattern (that is, set_pattern(true) was ca...
Definition: filename.I:531
void set_basename_wo_extension(const std::string &s)
Replaces the basename part of the filename, without the file extension.
Definition: filename.cxx:783
int find_on_searchpath(const DSearchPath &searchpath)
Performs the reverse of the resolve_filename() operation: assuming that the current filename is fully...
Definition: filename.cxx:1689
bool make_relative_to(Filename directory, bool allow_backups=true)
Adjusts this filename, which must be a fully-specified pathname beginning with a slash,...
Definition: filename.cxx:1640
std::string get_basename_wo_extension() const
Returns the basename part of the filename, without the file extension.
Definition: filename.I:386
void make_absolute()
Converts the filename to a fully-qualified pathname from the root (if it is a relative pathname),...
Definition: filename.cxx:968
static Filename pattern_filename(const std::string &filename)
Constructs a filename that represents a sequence of numbered files.
Definition: filename.I:160
This class can be used to test for string matches against standard Unix- shell filename globbing conv...
Definition: globPattern.h:32
bool matches(const std::string &candidate) const
Returns true if the candidate string matches the pattern, false otherwise.
Definition: globPattern.I:122
This is a base class for the GraphicsStateGuardian class, which is itself a base class for the variou...
static GraphicsStateGuardianBase * get_default_gsg()
Returns a pointer to the "default" GSG.
Encodes a string name in a hash table, mapping it to a pointer.
Definition: internalName.h:38
get_name
Returns the complete name represented by the InternalName and all of its parents.
Definition: internalName.h:61
Specifies parameters that may be passed to the loader.
Definition: loaderOptions.h:23
set_auto_texture_scale
Set this flag to ATS_none, ATS_up, ATS_down, or ATS_pad to control how a texture is scaled from disk ...
Definition: loaderOptions.h:69
get_auto_texture_scale
See set_auto_texture_scale().
Definition: loaderOptions.h:69
get_texture_num_views
See set_texture_num_views().
Definition: loaderOptions.h:64
void unlock()
Alias for release() to match C++11 semantics.
Definition: mutexDirect.I:39
void lock()
Alias for acquire() to match C++11 semantics.
Definition: mutexDirect.I:19
A lightweight C++ object whose constructor calls acquire() and whose destructor calls release() on a ...
Definition: mutexHolder.h:25
A base class for all things which can have a name.
Definition: namable.h:26
bool has_name() const
Returns true if the Namable has a nonempty name set, false if the name is empty.
Definition: namable.I:44
get_maxval
Returns the maximum channel value allowable for any pixel in this image; for instance,...
int get_x_size() const
Returns the number of pixels in the X direction.
PNMReader * make_reader(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true) const
Returns a newly-allocated PNMReader of the suitable type for reading from the indicated image filenam...
static bool is_grayscale(ColorType color_type)
This static variant of is_grayscale() returns true if the indicated image type represents a grayscale...
get_num_channels
Returns the number of channels in the image.
static bool has_alpha(ColorType color_type)
This static variant of has_alpha() returns true if the indicated image type includes an alpha channel...
int get_y_size() const
Returns the number of pixels in the Y direction.
get_type
If the file type is known (e.g.
The name of this class derives from the fact that we originally implemented it as a layer on top of t...
Definition: pnmImage.h:58
void clear()
Frees all memory allocated for the image, and clears all its parameters (size, color,...
Definition: pnmImage.cxx:48
void set_read_size(int x_size, int y_size)
Specifies the size to we'd like to scale the image upon reading it.
Definition: pnmImage.I:288
xelval get_channel_val(int x, int y, int channel) const
Returns the nth component color at the indicated pixel.
Definition: pnmImage.cxx:837
void set_blue(int x, int y, float b)
Sets the blue component color only at the indicated pixel.
Definition: pnmImage.I:836
void alpha_fill(float alpha=0.0)
Sets the entire alpha channel to the given level.
Definition: pnmImage.I:272
xelval get_green_val(int x, int y) const
Returns the green component color at the indicated pixel.
Definition: pnmImage.I:462
void set_green(int x, int y, float g)
Sets the green component color only at the indicated pixel.
Definition: pnmImage.I:827
float get_alpha(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:809
float get_gray(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:799
void quick_filter_from(const PNMImage &copy, int xborder=0, int yborder=0)
Resizes from the given image, with a fixed radius of 0.5.
void fill(float red, float green, float blue)
Sets the entire image (except the alpha channel) to the given color.
Definition: pnmImage.I:246
void set_num_channels(int num_channels)
Changes the number of channels associated with the image.
Definition: pnmImage.I:353
xelval get_alpha_val(int x, int y) const
Returns the alpha component color at the indicated pixel.
Definition: pnmImage.I:494
void set_red(int x, int y, float r)
Sets the red component color only at the indicated pixel.
Definition: pnmImage.I:818
void copy_header_from(const PNMImageHeader &header)
Copies just the header information into this image.
Definition: pnmImage.cxx:200
void take_from(PNMImage &orig)
Move the contents of the other image into this one, and empty the other image.
Definition: pnmImage.cxx:224
bool is_valid() const
Returns true if the image has been read in or correctly initialized with a height and width.
Definition: pnmImage.I:342
xelval get_blue_val(int x, int y) const
Returns the blue component color at the indicated pixel.
Definition: pnmImage.I:472
bool read(const Filename &filename, PNMFileType *type=nullptr, bool report_unknown_type=true)
Reads the indicated image filename.
Definition: pnmImage.cxx:278
xel * get_array()
Directly access the underlying PNMImage array.
Definition: pnmImage.I:1098
xelval get_red_val(int x, int y) const
Returns the red component color at the indicated pixel.
Definition: pnmImage.I:452
int get_read_y_size() const
Returns the requested y_size of the image if set_read_size() has been called, or the image y_size oth...
Definition: pnmImage.I:324
xelval get_gray_val(int x, int y) const
Returns the gray component color at the indicated pixel.
Definition: pnmImage.I:484
void set_alpha(int x, int y, float a)
Sets the alpha component color only at the indicated pixel.
Definition: pnmImage.I:859
ColorSpace get_color_space() const
Returns the color space in which the image is encoded.
Definition: pnmImage.I:332
void add_alpha()
Adds an alpha channel to the image, if it does not already have one.
Definition: pnmImage.I:363
xelval * get_alpha_array()
Directly access the underlying PNMImage array of alpha values.
Definition: pnmImage.I:1115
bool write(const Filename &filename, PNMFileType *type=nullptr) const
Writes the image to the indicated filename.
Definition: pnmImage.cxx:385
int get_read_x_size() const
Returns the requested x_size of the image if set_read_size() has been called, or the image x_size oth...
Definition: pnmImage.I:315
This is an abstract base class that defines the interface for reading image files of various types.
Definition: pnmReader.h:27
virtual bool is_floating_point()
Returns true if this PNMFileType represents a floating-point image type, false if it is a normal,...
Definition: pnmReader.cxx:71
A lightweight class that represents a single element that may be timed and/or counted via stats.
A lightweight class that can be used to automatically start and stop a PStatCollector around a sectio...
Definition: pStatTimer.h:30
Defines a pfm file, a 2-d table of floating-point numbers, either 3-component or 1-component,...
Definition: pfmFile.h:31
bool read(const Filename &fullpath)
Reads the PFM data from the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:121
bool write(const Filename &fullpath)
Writes the PFM data to the indicated file, returning true on success, false on failure.
Definition: pfmFile.cxx:204
bool store(PNMImage &pnmimage) const
Copies the data to the indicated PNMImage, converting to RGB values.
Definition: pfmFile.cxx:360
void set_channel(int x, int y, int c, PN_float32 value)
Replaces the cth channel of the point value at the indicated point.
Definition: pfmFile.I:63
bool load(const PNMImage &pnmimage)
Fills the PfmFile with the data from the indicated PNMImage, converted to floating-point values.
Definition: pfmFile.cxx:287
PN_float32 get_channel(int x, int y, int c) const
Returns the cth channel of the point value at the indicated point.
Definition: pfmFile.I:52
void clear()
Eliminates all data in the file.
Definition: pfmFile.cxx:77
A table of objects that are saved within the graphics context for reference by handle later.
bool is_texture_queued(const Texture *tex) const
Returns true if the texture has been queued on this GSG, false otherwise.
TextureContext * prepare_texture_now(Texture *tex, int view, GraphicsStateGuardianBase *gsg)
Immediately creates a new TextureContext for the indicated texture and returns it.
bool dequeue_texture(Texture *tex)
Removes a texture from the queued list of textures to be prepared.
void release_texture(TextureContext *tc)
Indicates that a texture context, created by a previous call to prepare_texture(),...
void ref() const
Explicitly increments the reference count.
void local_object()
This function should be called, once, immediately after creating a new instance of some ReferenceCoun...
get_ref_count
Returns the current reference count.
virtual bool unref() const
Explicitly decrements the reference count.
Represents a set of settings that indicate how a texture is sampled.
Definition: samplerState.h:36
get_minfilter
Returns the filter mode of the texture for minification.
Definition: samplerState.h:115
get_wrap_v
Returns the wrap mode of the texture in the V direction.
Definition: samplerState.h:113
get_anisotropic_degree
Returns the degree of anisotropic filtering that should be applied to the texture.
Definition: samplerState.h:119
get_magfilter
Returns the filter mode of the texture for magnification.
Definition: samplerState.h:116
get_wrap_w
Returns the wrap mode of the texture in the W direction.
Definition: samplerState.h:114
get_wrap_u
Returns the wrap mode of the texture in the U direction.
Definition: samplerState.h:112
get_border_color
Returns the solid color of the texture's border.
Definition: samplerState.h:121
A class to read sequential binary data directly from an istream.
Definition: streamReader.h:28
This is a special class object that holds all the information returned by a particular GSG to indicat...
bool was_image_modified() const
Returns true if the texture image has been modified since the last time mark_loaded() was called.
An instance of this object is returned by Texture::peek().
Definition: texturePeeker.h:27
static Texture * load_texture(const Filename &filename, int primary_file_num_channels=0, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads the given filename up into a texture, if it has not already been loaded, and returns the new te...
Definition: texturePool.I:47
static Texture * load_2d_texture_array(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 2-D texture array that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:101
static Texture * load_cube_map(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a cube map texture that is specified with a series of 6 pages, numbered 0 through 5.
Definition: texturePool.I:118
static Texture * load_3d_texture(const Filename &filename_pattern, bool read_mipmaps=false, const LoaderOptions &options=LoaderOptions())
Loads a 3-D texture that is specified with a series of n pages, all numbered in sequence,...
Definition: texturePool.I:84
Represents a texture object, which is typically a single 2-d image but may also represent a 1-d or 3-...
Definition: texture.h:71
CPTA_uchar get_ram_image_as(const std::string &requested_format)
Returns the uncompressed system-RAM image data associated with the texture.
Definition: texture.cxx:7362
static TextureType string_texture_type(const std::string &str)
Returns the TextureType corresponding to the indicated string word.
Definition: texture.cxx:2103
virtual void ensure_loader_type(const Filename &filename)
May be called prior to calling read_txo() or any bam-related Texture- creating callback,...
Definition: texture.cxx:2836
TextureContext * prepare_now(int view, PreparedGraphicsObjects *prepared_objects, GraphicsStateGuardianBase *gsg)
Creates a context for the texture on the particular GSG, if it does not already exist.
Definition: texture.cxx:1981
static std::string format_component_type(ComponentType ct)
Returns the indicated ComponentType converted to a string word.
Definition: texture.cxx:2129
Texture(const std::string &name=std::string())
Constructs an empty texture.
Definition: texture.cxx:374
bool get_resident(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture is reported to be resident within graphics memory for the indicated GSG.
Definition: texture.cxx:1546
Texture * load_related(const InternalName *suffix) const
Loads a texture whose filename is derived by concatenating a suffix to the filename of this texture.
Definition: texture.cxx:973
static CompressionMode string_compression_mode(const std::string &str)
Returns the CompressionMode value associated with the given string representation.
Definition: texture.cxx:2462
PTA_uchar new_simple_ram_image(int x_size, int y_size)
Creates an empty array for the simple ram image of the indicated size, and returns a modifiable point...
Definition: texture.cxx:1303
static bool is_specific(CompressionMode compression)
Returns true if the indicated compression mode is one of the specific compression types,...
Definition: texture.cxx:2616
bool has_ram_image() const
Returns true if the Texture has its image contents available in main RAM, false if it exists only in ...
Definition: texture.I:1242
static std::string format_quality_level(QualityLevel tql)
Returns the indicated QualityLevel converted to a string word.
Definition: texture.cxx:2505
size_t estimate_texture_memory() const
Estimates the amount of texture memory that will be consumed by loading this texture.
Definition: texture.cxx:675
bool read(const Filename &fullpath, const LoaderOptions &options=LoaderOptions())
Reads the named filename into the texture.
Definition: texture.cxx:551
void consider_rescale(PNMImage &pnmimage)
Asks the PNMImage to change its scale when it reads the image, according to the whims of the Config....
Definition: texture.cxx:2039
bool write(const Filename &fullpath)
Writes the texture to the named filename.
Definition: texture.I:298
static bool has_binary_alpha(Format format)
Returns true if the indicated format includes a binary alpha only, false otherwise.
Definition: texture.cxx:2663
void * get_ram_mipmap_pointer(int n) const
Similiar to get_ram_mipmap_image(), however, in this case the void pointer for the given ram image is...
Definition: texture.cxx:1228
static std::string format_compression_mode(CompressionMode cm)
Returns the indicated CompressionMode converted to a string word.
Definition: texture.cxx:2420
get_aux_data
Returns a record previously recorded via set_aux_data().
Definition: texture.h:552
static bool is_srgb(Format format)
Returns true if the indicated format is in the sRGB color space, false otherwise.
Definition: texture.cxx:2678
void set_orig_file_size(int x, int y, int z=1)
Specifies the size of the texture as it exists in its original disk file, before any Panda scaling.
Definition: texture.cxx:1961
bool get_active(PreparedGraphicsObjects *prepared_objects) const
Returns true if this Texture was rendered in the most recent frame within the indicated GSG.
Definition: texture.cxx:1519
get_keep_ram_image
Returns the flag that indicates whether this Texture is eligible to have its main RAM copy of the tex...
Definition: texture.h:472
bool read_dds(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a DDS file object.
Definition: texture.cxx:943
void generate_normalization_cube_map(int size)
Generates a special cube map image in the texture that can be used to apply bump mapping effects: for...
Definition: texture.cxx:424
bool has_compression() const
Returns true if the texture indicates it wants to be compressed, either with CM_on or higher,...
Definition: texture.I:1102
static QualityLevel string_quality_level(const std::string &str)
Returns the QualityLevel value associated with the given string representation.
Definition: texture.cxx:2525
void generate_alpha_scale_map()
Generates a special 256x1 1-d texture that can be used to apply an arbitrary alpha scale to objects b...
Definition: texture.cxx:526
bool read_txo(std::istream &in, const std::string &filename="")
Reads the texture from a Panda texture object.
Definition: texture.cxx:845
static ComponentType string_component_type(const std::string &str)
Returns the ComponentType corresponding to the indicated string word.
Definition: texture.cxx:2158
static void register_with_read_factory()
Factory method to generate a Texture object.
Definition: texture.cxx:9964
static bool adjust_size(int &x_size, int &y_size, const std::string &name, bool for_padding, AutoTextureScale auto_texture_scale=ATS_unspecified)
Computes the proper size of the texture, based on the original size, the filename,...
Definition: texture.cxx:2726
virtual void finalize(BamReader *manager)
Called by the BamReader to perform any final actions needed for setting up the object after all objec...
Definition: texture.cxx:9992
static int up_to_power_2(int value)
Returns the smallest power of 2 greater than or equal to value.
Definition: texture.cxx:2008
static AutoTextureScale get_textures_power_2()
This flag returns ATS_none, ATS_up, or ATS_down and controls the scaling of textures in general.
Definition: texture.I:1863
get_auto_texture_scale
Returns the power-of-2 texture-scaling mode that will be applied to this particular texture when it i...
Definition: texture.h:532
void set_ram_mipmap_pointer_from_int(long long pointer, int n, int page_size)
Accepts a raw pointer cast as an int, which is then passed to set_ram_mipmap_pointer(); see the docum...
Definition: texture.cxx:1269
virtual void write_datagram(BamWriter *manager, Datagram &me)
Function to write the important information in the particular object to a Datagram.
Definition: texture.cxx:9973
static int down_to_power_2(int value)
Returns the largest power of 2 less than or equal to value.
Definition: texture.cxx:2020
bool release(PreparedGraphicsObjects *prepared_objects)
Frees the texture context only on the indicated object, if it exists there.
Definition: texture.cxx:1573
virtual bool has_cull_callback() const
Should be overridden by derived classes to return true if cull_callback() has been defined.
Definition: texture.cxx:2574
bool uses_mipmaps() const
Returns true if the minfilter settings on this texture indicate the use of mipmapping,...
Definition: texture.I:1127
static std::string format_texture_type(TextureType tt)
Returns the indicated TextureType converted to a string word.
Definition: texture.cxx:2077
has_simple_ram_image
Returns true if the Texture has a "simple" image available in main RAM.
Definition: texture.h:517
static bool is_integer(Format format)
Returns true if the indicated format is an integer format, false otherwise.
Definition: texture.cxx:2695
PTA_uchar modify_simple_ram_image()
Returns a modifiable pointer to the internal "simple" texture image.
Definition: texture.cxx:1292
void clear_ram_mipmap_image(int n)
Discards the current system-RAM image for the nth mipmap level.
Definition: texture.cxx:1277
bool was_image_modified(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture needs to be re-loaded onto the indicated GSG, either because its image da...
Definition: texture.cxx:1460
bool read_ktx(std::istream &in, const std::string &filename="", bool header_only=false)
Reads the texture from a KTX file object.
Definition: texture.cxx:960
size_t get_data_size_bytes(PreparedGraphicsObjects *prepared_objects) const
Returns the number of bytes which the texture is reported to consume within graphics memory,...
Definition: texture.cxx:1492
get_expected_ram_page_size
Returns the number of bytes that should be used per each Z page of the 3-d texture.
Definition: texture.h:449
virtual bool cull_callback(CullTraverser *trav, const CullTraverserData &data) const
If has_cull_callback() returns true, this function will be called during the cull traversal to perfor...
Definition: texture.cxx:2588
bool is_prepared(PreparedGraphicsObjects *prepared_objects) const
Returns true if the texture has already been prepared or enqueued for preparation on the indicated GS...
Definition: texture.cxx:1444
void set_ram_image_as(CPTA_uchar image, const std::string &provided_format)
Replaces the current system-RAM image with the new data, converting it first if necessary from the in...
Definition: texture.cxx:1025
void set_ram_mipmap_pointer(int n, void *image, size_t page_size=0)
Sets an explicit void pointer as the texture's mipmap image for the indicated level.
Definition: texture.cxx:1247
set_aux_data
Records an arbitrary object in the Texture, associated with a specified key.
Definition: texture.h:552
void texture_uploaded()
This method is called by the GraphicsEngine at the beginning of the frame *after* a texture has been ...
Definition: texture.cxx:2551
void set_size_padded(int x=1, int y=1, int z=1)
Changes the size of the texture, padding if necessary, and setting the pad region as well.
Definition: texture.cxx:1932
static bool has_alpha(Format format)
Returns true if the indicated format includes alpha, false otherwise.
Definition: texture.cxx:2632
get_num_loadable_ram_mipmap_images
Returns the number of contiguous mipmap levels that exist in RAM, up until the first gap in the seque...
Definition: texture.h:502
void generate_simple_ram_image()
Computes the "simple" ram image by loading the main RAM image, if it is not already available,...
Definition: texture.cxx:1324
static Format string_format(const std::string &str)
Returns the Format corresponding to the indicated string word.
Definition: texture.cxx:2302
clear_aux_data
Removes a record previously recorded via set_aux_data().
Definition: texture.h:552
int release_all()
Frees the context allocated on all objects for which the texture has been declared.
Definition: texture.cxx:1599
CPTA_uchar get_ram_mipmap_image(int n) const
Returns the system-RAM image data associated with the nth mipmap level, if present.
Definition: texture.cxx:1214
static std::string format_format(Format f)
Returns the indicated Format converted to a string word.
Definition: texture.cxx:2188
is_cacheable
Returns true if there is enough information in this Texture object to write it to the bam cache succe...
Definition: texture.h:473
static bool is_unsigned(ComponentType ctype)
Returns true if the indicated component type is unsigned, false otherwise.
Definition: texture.cxx:2604
A thread; that is, a lightweight process.
Definition: thread.h:46
static void consider_yield()
Possibly suspends the current thread for the rest of the current epoch, if it has run for enough this...
Definition: thread.I:212
get_current_thread
Returns a pointer to the currently-executing Thread object.
Definition: thread.h:109
TypeHandle is the identifier used to differentiate C++ class types.
Definition: typeHandle.h:81
bool is_exact_type(TypeHandle handle) const
Returns true if the current object is the indicated type exactly.
Definition: typedObject.I:38
bool is_of_type(TypeHandle handle) const
Returns true if the current object is or derives from the indicated type.
Definition: typedObject.I:28
A base class for things which need to inherit from both TypedObject and from ReferenceCount.
Base class for objects that can be written to and read from Bam files.
Definition: typedWritable.h:35
A hierarchy of directories and files that appears to be one continuous file system,...
static void close_write_file(std::ostream *stream)
Closes a file opened by a previous call to open_write_file().
Filename get_cwd() const
Returns the current directory name.
bool exists(const Filename &filename) const
Convenience function; returns true if the named file exists.
bool resolve_filename(Filename &filename, const DSearchPath &searchpath, const std::string &default_extension=std::string()) const
Searches the given search path for the filename.
std::ostream * open_write_file(const Filename &filename, bool auto_wrap, bool truncate)
Convenience function; returns a newly allocated ostream if the file exists and can be written,...
static void close_read_file(std::istream *stream)
Closes a file opened by a previous call to open_read_file().
PointerTo< VirtualFile > get_file(const Filename &filename, bool status_only=false) const
Looks up the file by the indicated name in the file system.
static VirtualFileSystem * get_global_ptr()
Returns the default global VirtualFileSystem.
The abstract base class for a file or directory within the VirtualFileSystem.
Definition: virtualFile.h:35
This is our own Panda specialization on the default STL map.
Definition: pmap.h:49
This is our own Panda specialization on the default STL vector.
Definition: pvector.h:42
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
BEGIN_PUBLISH EXPCL_PANDA_PNMIMAGE float decode_sRGB_float(unsigned char val)
Decodes the sRGB-encoded unsigned char value to a linearized float in the range 0-1.
Definition: convert_srgb.I:18
EXPCL_PANDA_PNMIMAGE unsigned char encode_sRGB_uchar(unsigned char val)
Encodes the linearized unsigned char value to an sRGB-encoded unsigned char value.
Definition: convert_srgb.I:80
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
std::ostream & indent(std::ostream &out, int indent_level)
A handy function for doing text formatting.
Definition: indent.cxx:20
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
int get_lowest_on_bit(unsigned short x)
Returns the index of the lowest 1 bit in the word.
Definition: pbitops.I:175
int get_next_higher_bit(unsigned short x)
Returns the smallest power of 2 greater than x.
Definition: pbitops.I:328
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
string upcase(const string &s)
Returns the input string with all lowercase letters converted to uppercase.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
void release_read(const CycleData *pointer) const
Releases a pointer previously obtained via a call to read().
CycleDataType * write_upstream(bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::write_upstream().
CycleDataType * elevate_read_upstream(const CycleDataType *pointer, bool force_to_0, Thread *current_thread)
See PipelineCyclerBase::elevate_read_upstream().
const CycleDataType * read(Thread *current_thread) const
See PipelineCyclerBase::read().
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PT(Texture) Texture
Constructs a new Texture object from the txo file.
Definition: texture.cxx:859
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.