50 case VK_FORMAT_D32_SFLOAT_S8_UINT:
51 case VK_FORMAT_D24_UNORM_S8_UINT:
52 case VK_FORMAT_D16_UNORM_S8_UINT:
53 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
55 return VK_IMAGE_ASPECT_DEPTH_BIT;
62 VkPipelineStageFlags srcStage = 0;
63 VkPipelineStageFlags dstStage = 0;
65 VkImageMemoryBarrier imgBarrier = {
66 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
68 .oldLayout = oldLayout,
69 .newLayout = newLayout,
70 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
71 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
73 .subresourceRange.baseMipLevel = 0,
74 .subresourceRange.baseArrayLayer = 0,
75 .subresourceRange.layerCount = 1,
76 .subresourceRange.levelCount =
texture->mipLevels
79 if (newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
85 imgBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
88 if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
90 imgBarrier.srcAccessMask = 0;
91 imgBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
92 srcStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
93 dstStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
96 else if (oldLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
98 imgBarrier.srcAccessMask = 0;
99 imgBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
100 srcStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
101 dstStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
103 else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
107 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
108 imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
109 srcStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
110 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
117 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
118 imgBarrier.dstAccessMask = 0;
121 srcStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
122 dstStage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
127 if (
texture->sharingMode == VK_SHARING_MODE_EXCLUSIVE)
129 imgBarrier.srcAccessMask = 0;
130 imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
133 srcStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
134 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
138 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
139 imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
140 srcStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
141 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
146 else if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
148 imgBarrier.srcAccessMask = 0;
149 imgBarrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
150 srcStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
151 dstStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
153 else if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
155 imgBarrier.srcAccessMask = 0;
156 imgBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
157 srcStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
158 dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
162 assert(0 && !
"Invalid image stage!");
165 vkCmdPipelineBarrier(*cmdBuffer, srcStage, dstStage, 0, 0,
NULL, 0,
NULL, 1, &imgBarrier);
171 int32_t mipWidth =
width;
172 int32_t mipHeight =
height;
175 VkImageMemoryBarrier imgBarrier = {
176 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
178 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
179 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
181 .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
182 .subresourceRange.levelCount = 1,
183 .subresourceRange.baseArrayLayer = 0,
184 .subresourceRange.layerCount = 1
188 for (uint32_t
i = 1;
i <
texture->mipLevels; ++
i)
190 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
191 imgBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
192 imgBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
193 imgBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
194 imgBarrier.subresourceRange.baseMipLevel =
i - 1;
196 vkCmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0,
NULL, 0,
NULL, 1, &imgBarrier);
199 .srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
200 .srcSubresource.mipLevel =
i - 1,
201 .srcSubresource.baseArrayLayer = 0,
202 .srcSubresource.layerCount = 1,
203 .srcOffsets[0] = { 0, 0, 0 },
204 .srcOffsets[1] = { mipWidth, mipHeight, 1 },
205 .dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
206 .dstSubresource.mipLevel =
i,
207 .dstSubresource.baseArrayLayer = 0,
208 .dstSubresource.layerCount = 1,
209 .dstOffsets[0] = { 0, 0, 0 },
210 .dstOffsets[1] = { mipWidth > 1 ? mipWidth >> 1 : 1,
211 mipHeight > 1 ? mipHeight >> 1 : 1, 1 }
215 vkCmdBlitImage(*cmdBuffer,
texture->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
216 texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, mipFilter);
218 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
219 imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
220 imgBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
221 imgBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
223 vkCmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0,
NULL, 0,
NULL, 1, &imgBarrier);
226 if (mipWidth > 1) mipWidth >>= 1;
227 if (mipHeight > 1) mipHeight >>= 1;
230 imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
231 imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
232 imgBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
233 imgBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
234 imgBarrier.subresourceRange.baseMipLevel =
texture->mipLevels - 1;
236 vkCmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0,
NULL, 0,
NULL, 1, &imgBarrier);
246 VkBuffer staging_buffer;
247 VkCommandBuffer command_buffer;
248 uint32_t staging_offset;
249 void *imgData =
QVk_GetStagingBuffer(imageSize, 4, &command_buffer, &staging_buffer, &staging_offset);
250 memcpy(imgData, data, (
size_t)imageSize);
252 VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
255 imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
261 VkBufferImageCopy region = {
262 .bufferOffset = staging_offset,
263 .bufferRowLength = 0,
264 .bufferImageHeight = 0,
265 .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
266 .imageSubresource.mipLevel = 0,
267 .imageSubresource.baseArrayLayer = 0,
268 .imageSubresource.layerCount = 1,
269 .imageOffset = { 0, 0, 0 },
273 vkCmdCopyBufferToImage(command_buffer, staging_buffer, dstTex->
image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
283 if (unifiedTransferAndGfx || dstTex->
sharingMode == VK_SHARING_MODE_EXCLUSIVE)
286 if (!unifiedTransferAndGfx)
293 VkResult
QVk_CreateImageView(
const VkImage *image, VkImageAspectFlags aspectFlags, VkImageView *imageView, VkFormat
format, uint32_t mipLevels)
295 VkImageViewCreateInfo ivCreateInfo = {
296 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
300 .viewType = VK_IMAGE_VIEW_TYPE_2D,
302 .components.r = VK_COMPONENT_SWIZZLE_IDENTITY,
303 .components.g = VK_COMPONENT_SWIZZLE_IDENTITY,
304 .components.b = VK_COMPONENT_SWIZZLE_IDENTITY,
305 .components.a = VK_COMPONENT_SWIZZLE_IDENTITY,
306 .subresourceRange.aspectMask = aspectFlags,
307 .subresourceRange.baseArrayLayer = 0,
308 .subresourceRange.baseMipLevel = 0,
309 .subresourceRange.layerCount = 1,
310 .subresourceRange.levelCount = mipLevels
318 VkImageCreateInfo imageInfo = {
319 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
320 .imageType = VK_IMAGE_TYPE_2D,
321 .extent.width =
width,
324 .mipLevels =
texture->mipLevels,
328 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
330 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
331 .samples =
texture->sampleCount,
338 imageInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
339 imageInfo.queueFamilyIndexCount = 2;
340 imageInfo.pQueueFamilyIndices = queueFamilies;
348 texture->sharingMode = imageInfo.sharingMode;
388 VkDescriptorSetAllocateInfo dsAllocInfo = {
389 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
392 .descriptorSetCount = 1,
408 VkBuffer staging_buffer;
409 VkCommandBuffer command_buffer;
410 uint32_t staging_offset;
411 void *imgData =
QVk_GetStagingBuffer(imageSize, 4, &command_buffer, &staging_buffer, &staging_offset);
412 memcpy(imgData, data, (
size_t)imageSize);
416 VkBufferImageCopy region = {
417 .bufferOffset = staging_offset,
418 .bufferRowLength = 0,
419 .bufferImageHeight = 0,
420 .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
421 .imageSubresource.mipLevel = 0,
422 .imageSubresource.baseArrayLayer = 0,
423 .imageSubresource.layerCount = 1,
424 .imageOffset = { offset_x, offset_y, 0 },
428 vkCmdCopyBufferToImage(command_buffer, staging_buffer,
texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
438 if (unifiedTransferAndGfx ||
texture->sharingMode == VK_SHARING_MODE_EXCLUSIVE)
441 if (!unifiedTransferAndGfx)
453 if (
texture->image != VK_NULL_HANDLE)
455 if (
texture->imageView != VK_NULL_HANDLE)
457 if (
texture->descriptorSet != VK_NULL_HANDLE)
460 texture->image = VK_NULL_HANDLE;
461 texture->imageView = VK_NULL_HANDLE;
462 texture->descriptorSet = VK_NULL_HANDLE;
468 VkCommandBuffer cmdBuffer;
470 .
usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
471 .reqMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
472 .prefMemFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
488 VkImageMemoryBarrier imgBarrier = {
489 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
491 .srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
492 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
493 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
494 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
495 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
496 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
498 .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
499 .subresourceRange.baseMipLevel = 0,
500 .subresourceRange.baseArrayLayer = 0,
501 .subresourceRange.layerCount = 1,
502 .subresourceRange.levelCount = 1
505 vkCmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0,
NULL, 0,
NULL, 1, &imgBarrier);
507 VkBufferImageCopy region = {
509 .bufferRowLength =
width,
510 .bufferImageHeight =
height,
511 .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
512 .imageSubresource.mipLevel = 0,
513 .imageSubresource.baseArrayLayer = 0,
514 .imageSubresource.layerCount = 1,
515 .imageOffset = { 0, 0, 0 },
587 #define BLOCK_WIDTH 256
588 #define BLOCK_HEIGHT 256
610 for (
j=0 ;
j<
w ;
j++)
627 for (
i=0 ;
i<
w ;
i++)
650 #define NUM_VK_MODES (sizeof(modes) / sizeof (vkmode_t))
661 static char prev_mode[32] = {
"VK_MIPMAP_LINEAR" };
671 ri.
Con_Printf(
PRINT_ALL,
"bad filter name (valid values: VK_NEAREST, VK_LINEAR, VK_MIPMAP_NEAREST, VK_MIPMAP_LINEAR)\n");
676 memcpy(prev_mode,
string, strlen(
string));
677 prev_mode[strlen(
string)] =
'\0';
708 static char prev_mode[32] = {
"VK_MIPMAP_LINEAR" };
718 ri.
Con_Printf(
PRINT_ALL,
"bad filter name (valid values: VK_NEAREST, VK_LINEAR, VK_MIPMAP_NEAREST, VK_MIPMAP_LINEAR)\n");
723 memcpy(prev_mode,
string, strlen(
string));
724 prev_mode[strlen(
string)] =
'\0';
757 int dataByte, runLength;
800 out = malloc ( (pcx->
ymax+1) * (pcx->
xmax+1) );
808 *palette = malloc(768);
809 memcpy (*palette, (
byte *)pcx + len - 768, 768);
819 for (
x=0 ;
x<=pcx->
xmax ; )
823 if((dataByte & 0xC0) == 0xC0)
825 runLength = dataByte & 0x3F;
831 while(runLength-- > 0)
837 if ( raw - (
byte *)pcx > len)
871 int columns, rows, numPixels;
927 columns = targa_header.
width;
928 rows = targa_header.
height;
929 numPixels = columns * rows;
936 targa_rgba = malloc (numPixels*4);
943 for(row=rows-1; row>=0; row--) {
944 pixbuf = targa_rgba + row*columns*4;
945 for(column=0; column<columns; column++) {
962 alphabyte = *buf_p++;
966 *pixbuf++ = alphabyte;
973 unsigned char red = 0,
green = 0,
blue = 0,alphabyte = 0,packetHeader,packetSize,
j;
974 for(row=rows-1; row>=0; row--) {
975 pixbuf = targa_rgba + row*columns*4;
976 for(column=0; column<columns; ) {
977 packetHeader= *buf_p++;
978 packetSize = 1 + (packetHeader & 0x7f);
979 if (packetHeader & 0x80) {
991 alphabyte = *buf_p++;
995 for(
j=0;
j<packetSize;
j++) {
1001 if (column==columns) {
1007 pixbuf = targa_rgba + row*columns*4;
1012 for(
j=0;
j<packetSize;
j++) {
1027 alphabyte = *buf_p++;
1031 *pixbuf++ = alphabyte;
1035 if (column==columns) {
1041 pixbuf = targa_rgba + row*columns*4;
1077 #define FLOODFILL_FIFO_SIZE 0x1000
1078 #define FLOODFILL_FIFO_MASK (FLOODFILL_FIFO_SIZE - 1)
1080 #define FLOODFILL_STEP( off, dx, dy ) \
1082 if (pos[off] == fillcolor) \
1085 fifo[inpt].x = x + (dx), fifo[inpt].y = y + (dy); \
1086 inpt = (inpt + 1) & FLOODFILL_FIFO_MASK; \
1088 else if (pos[off] != 255) fdc = pos[off]; \
1093 byte fillcolor = *
skin;
1095 int inpt = 0, outpt = 0;
1096 int filledcolor = -1;
1099 if (filledcolor == -1)
1103 for (
i = 0;
i < 256; ++
i)
1112 if ((fillcolor == filledcolor) || (fillcolor == 255))
1118 fifo[inpt].
x = 0, fifo[inpt].
y = 0;
1121 while (outpt != inpt)
1123 int x = fifo[outpt].
x,
y = fifo[outpt].
y;
1124 int fdc = filledcolor;
1145 void Vk_ResampleTexture (
unsigned *in,
int inwidth,
int inheight,
unsigned *out,
int outwidth,
int outheight)
1148 unsigned *inrow, *inrow2;
1149 unsigned frac, fracstep;
1150 unsigned p1[1024], p2[1024];
1151 byte *pix1, *pix2, *pix3, *pix4;
1153 fracstep = inwidth*0x10000/outwidth;
1156 for (
i=0 ;
i<outwidth ;
i++)
1158 p1[
i] = 4*(frac>>16);
1161 frac = 3*(fracstep>>2);
1162 for (
i=0 ;
i<outwidth ;
i++)
1164 p2[
i] = 4*(frac>>16);
1168 for (
i=0 ;
i<outheight ;
i++, out += outwidth)
1170 inrow = in + inwidth*(
int)((
i+0.25)*inheight/outheight);
1171 inrow2 = in + inwidth*(
int)((
i+0.75)*inheight/outheight);
1173 for (
j=0 ;
j<outwidth ;
j++)
1175 pix1 = (
byte *)inrow + p1[
j];
1176 pix2 = (
byte *)inrow + p2[
j];
1177 pix3 = (
byte *)inrow2 + p1[
j];
1178 pix4 = (
byte *)inrow2 + p2[
j];
1179 ((
byte *)(out+
j))[0] = (pix1[0] + pix2[0] + pix3[0] + pix4[0])>>2;
1180 ((
byte *)(out+
j))[1] = (pix1[1] + pix2[1] + pix3[1] + pix4[1])>>2;
1181 ((
byte *)(out+
j))[2] = (pix1[2] + pix2[2] + pix3[2] + pix4[2])>>2;
1182 ((
byte *)(out+
j))[3] = (pix1[3] + pix2[3] + pix3[3] + pix4[3])>>2;
1204 c = inwidth*inheight;
1205 for (
i=0 ;
i<c ;
i++, p+=4)
1219 c = inwidth*inheight;
1220 for (
i=0 ;
i<c ;
i++, p+=4)
1241 unsigned scaled[256 * 256];
1242 int scaled_width, scaled_height;
1244 for (scaled_width = 1; scaled_width <
width; scaled_width <<= 1)
1248 for (scaled_height = 1; scaled_height <
height; scaled_height <<= 1)
1251 scaled_height >>= 1;
1261 if (scaled_width > 256)
1263 if (scaled_height > 256)
1264 scaled_height = 256;
1266 if (scaled_width < 1)
1268 if (scaled_height < 1)
1274 if (scaled_width * scaled_height >
sizeof(scaled) / 4)
1277 if (scaled_width ==
width && scaled_height ==
height)
1281 memcpy(
texBuffer, data, scaled_width * scaled_height * 4);
1291 memcpy(
texBuffer, scaled,
sizeof(scaled));
1296 while (scaled_width > 1 || scaled_height > 1)
1299 scaled_height >>= 1;
1300 if (scaled_width < 1)
1302 if (scaled_height < 1)
1323 static unsigned trans[512 * 256];
1329 if (
s >
sizeof(
trans) / 4)
1332 for (
i = 0;
i <
s;
i++)
1345 else if (
i > 0 && data[
i - 1] != 255)
1347 else if (
i <
s - 1 && data[
i + 1] != 255)
1388 if (strlen(
name) >=
sizeof(image->
name))
1416 for (
j = 0;
j<image->
width;
j++, k++)
1418 image->
scrap =
true;
1448 image->
scrap =
false;
1512 byte *pic, *palette;
1536 if (!strcmp(
name+len-4,
".pcx"))
1543 else if (!strcmp(
name+len-4,
".wal"))
1547 else if (!strcmp(
name+len-4,
".tga"))
1606 memset(image, 0,
sizeof(*image));
1630 for (
i=0 ;
i<256 ;
i++)
1636 v = (255<<24) + (
r<<0) + (g<<8) + (b<<16);
1671 for (
i = 0;
i < 256;
i++)
1681 inf = 255 * pow((
i + 0.5) / 255.5, g) + 0.5;
1690 for (
i = 0;
i<256;
i++)
1716 memset(image, 0,
sizeof(*image));