54 const uint8_t *val_table,
int nb_codes,
55 int use_static,
int is_ac)
58 uint16_t huff_code[256];
59 uint16_t huff_sym[256];
66 for (i = 0; i < 256; i++)
67 huff_sym[i] = i + 16 * is_ac;
70 huff_sym[0] = 16 * 256;
73 huff_code, 2, 2, huff_sym, 2, 2, use_static);
103 ht[i].
bits, ht[i].values, ht[i].codes,
104 0, ht[i].class == 1);
108 if (ht[i].
class < 2) {
112 ht[i].values, ht[i].length);
122 if (len > 14 && buf[12] == 1)
124 if (len > 14 && buf[12] == 2)
174 "error using external huffman table, switching back to internal\n");
222 for (i = 0; i < 64; i++) {
235 len -= 1 + 64 * (1+pr);
243 int len,
index,
i,
class, n, v, code_max;
265 for (i = 1; i <= 16; i++) {
270 if (len < n || n > 256)
274 for (i = 0; i < n; i++) {
285 class, index, code_max + 1);
286 if ((ret =
build_vlc(&s->
vlcs[
class][index], bits_table, val_table,
287 code_max + 1, 0,
class > 0)) < 0)
292 if ((ret =
build_vlc(&s->
vlcs[2][index], bits_table, val_table,
293 code_max + 1, 0, 0)) < 0)
297 for (i = 0; i < 16; i++)
299 for (i = 0; i < 256; i++)
319 if (bits > 16 || bits < 1) {
349 if (s->
buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->
buf_size * 4LL)
353 if (nb_components <= 0 ||
359 "nb_components changing in interlaced picture\n");
363 if (s->
ls && !(bits <= 8 || nb_components == 1)) {
365 "JPEG-LS that is not <= 8 " 366 "bits/component or 16-bit gray");
369 if (len != 8 + 3 * nb_components) {
377 for (i = 0; i < nb_components; i++) {
383 if (h_count[i] > s->
h_max)
385 if (v_count[i] > s->
v_max)
392 if (!h_count[i] || !v_count[i]) {
394 "Invalid sampling factor in component %d %d:%d\n",
395 i, h_count[i], v_count[i]);
400 i, h_count[i], v_count[i],
403 if ( nb_components == 4
416 if (nb_components == 2) {
430 memcmp(s->
h_count, h_count,
sizeof(h_count)) ||
431 memcmp(s->
v_count, v_count,
sizeof(v_count))) {
437 memcpy(s->
h_count, h_count,
sizeof(h_count));
438 memcpy(s->
v_count, v_count,
sizeof(v_count));
469 if (s->
v_max == 1 && s->
h_max == 1 && s->
lossless==1 && (nb_components==3 || nb_components==4))
474 pix_fmt_id = ((unsigned)s->
h_count[0] << 28) | (s->
v_count[0] << 24) |
481 if (!(pix_fmt_id & 0xD0D0D0D0))
482 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483 if (!(pix_fmt_id & 0x0D0D0D0D))
484 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
486 for (i = 0; i < 8; i++) {
487 int j = 6 + (i&1) - (i&6);
488 int is = (pix_fmt_id >> (4*
i)) & 0xF;
489 int js = (pix_fmt_id >> (4*j)) & 0xF;
491 if (is == 1 && js != 2 && (i < 2 || i > 5))
492 js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493 if (is == 1 && js != 2 && (i < 2 || i > 5))
494 js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
496 if (is == 1 && js == 2) {
503 if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
507 switch (pix_fmt_id) {
596 if (pix_fmt_id == 0x14111100)
637 if (pix_fmt_id == 0x42111100) {
641 }
else if (pix_fmt_id == 0x24111100) {
645 }
else if (pix_fmt_id == 0x23111100) {
682 else if (s->
bits <= 8)
698 #if CONFIG_MJPEG_NVDEC_HWACCEL 701 #if CONFIG_MJPEG_VAAPI_HWACCEL 729 for (i = 0; i < 4; i++)
748 int bw = (width + s->
h_max * 8 - 1) / (s->
h_max * 8);
749 int bh = (height + s->
v_max * 8 - 1) / (s->
v_max * 8);
782 if (code < 0 || code > 16) {
784 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
785 0, dc_index, &s->
vlcs[0][dc_index]);
797 int dc_index,
int ac_index, uint16_t *quant_matrix)
803 if (val == 0xfffff) {
807 val = val * (unsigned)quant_matrix[0] + s->
last_dc[component];
808 val = av_clip_int16(val);
818 i += ((unsigned)code) >> 4;
826 int sign = (~cache) >> 31;
827 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
837 block[j] = level * quant_matrix[
i];
846 int component,
int dc_index,
847 uint16_t *quant_matrix,
int Al)
852 if (val == 0xfffff) {
856 val = (val * (quant_matrix[0] << Al)) + s->
last_dc[component];
864 uint8_t *last_nnz,
int ac_index,
865 uint16_t *quant_matrix,
866 int ss,
int se,
int Al,
int *EOBRUN)
878 for (i = ss; ; i++) {
882 run = ((unsigned) code) >> 4;
891 int sign = (~cache) >> 31;
892 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
900 block[j] = level * (quant_matrix[
se] << Al);
907 block[j] = level * (quant_matrix[
i] << Al);
936 #define REFINE_BIT(j) { \ 937 UPDATE_CACHE(re, &s->gb); \ 938 sign = block[j] >> 15; \ 939 block[j] += SHOW_UBITS(re, &s->gb, 1) * \ 940 ((quant_matrix[i] ^ sign) - sign) << Al; \ 941 LAST_SKIP_BITS(re, &s->gb, 1); \ 949 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \ 954 j = s->scantable.permutated[i]; \ 957 else if (run-- == 0) \ 964 int ac_index, uint16_t *quant_matrix,
965 int ss,
int se,
int Al,
int *EOBRUN)
968 int last =
FFMIN(se, *last_nnz);
979 run = ((unsigned) code) >> 4;
986 block[j] = ((quant_matrix[
i] << Al) ^ val) -
val;
994 run = ((unsigned) code) >> 4;
1015 for (; i <= last; i++) {
1036 for (i = 0; i < nb_components; i++)
1050 for (i = 0; i < nb_components; i++)
1067 int left[4], top[4], topleft[4];
1068 const int linesize = s->
linesize[0];
1069 const int mask = ((1 << s->
bits) - 1) << point_transform;
1070 int resync_mb_y = 0;
1071 int resync_mb_x = 0;
1094 width = s->
mb_width / nb_components;
1104 for (i = 0; i < 4; i++)
1107 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1111 ptr += linesize >> 1;
1113 for (i = 0; i < 4; i++)
1114 top[i] = left[i] = topleft[i] =
buffer[0][i];
1117 for (i = 0; i < 6; i++)
1118 vpred[i] = 1 << (s->
bits-1);
1121 for (mb_x = 0; mb_x <
width; mb_x++) {
1122 int modified_predictor = predictor;
1134 top[i] = left[i]= topleft[i]= 1 << (s->
bits - 1);
1136 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1137 modified_predictor = 1;
1139 for (i=0;i<nb_components;i++) {
1142 topleft[
i] = top[
i];
1149 if (!s->
bayer || mb_x) {
1153 pred = vpred[
i] -
dc;
1156 PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1159 mask & (pred + (unsigned)(dc * (1 << point_transform)));
1168 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1169 ptr[4*mb_x + 2] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1170 ptr[4*mb_x + 1] =
buffer[mb_x][1] + ptr[4*mb_x + 2];
1171 ptr[4*mb_x + 3] =
buffer[mb_x][2] + ptr[4*mb_x + 2];
1172 ptr[4*mb_x + 0] =
buffer[mb_x][3];
1175 for(i=0; i<nb_components; i++) {
1178 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1181 }
else if(s->
bits == 9) {
1184 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1185 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][
i];
1189 }
else if (s->
rct) {
1190 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1191 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1192 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1193 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1196 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1198 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1199 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1201 }
else if (s->
bayer) {
1204 if (nb_components == 1) {
1206 for (mb_x = 0; mb_x <
width; mb_x++)
1207 ((uint16_t*)ptr)[mb_x] =
buffer[mb_x][0];
1208 }
else if (nb_components == 2) {
1209 for (mb_x = 0; mb_x <
width; mb_x++) {
1210 ((uint16_t*)ptr)[2*mb_x + 0] =
buffer[mb_x][0];
1211 ((uint16_t*)ptr)[2*mb_x + 1] =
buffer[mb_x][1];
1215 for(i=0; i<nb_components; i++) {
1218 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1221 }
else if(s->
bits == 9) {
1224 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1225 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][
i];
1235 int point_transform,
int nb_components)
1237 int i, mb_x, mb_y,
mask;
1239 int resync_mb_y = 0;
1240 int resync_mb_x = 0;
1242 point_transform += bits - s->
bits;
1243 mask = ((1 << s->
bits) - 1) << point_transform;
1245 av_assert0(nb_components>=1 && nb_components<=4);
1247 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1248 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1259 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1260 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1261 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1262 for (i = 0; i < nb_components; i++) {
1265 int n,
h, v, x, y,
c, j, linesize;
1274 if(bits>8) linesize /= 2;
1276 for(j=0; j<n; j++) {
1282 if ( h * mb_x + x >= s->
width 1283 || v * mb_y + y >= s->
height) {
1285 }
else if (bits<=8) {
1286 ptr = s->
picture_ptr->
data[
c] + (linesize * (v * mb_y + y)) + (h * mb_x + x);
1288 if(x==0 && leftcol){
1289 pred= 1 << (bits - 1);
1294 if(x==0 && leftcol){
1295 pred= ptr[-linesize];
1297 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1302 ptr += linesize >> 1;
1304 *ptr= pred + ((unsigned)dc << point_transform);
1306 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1308 if(x==0 && leftcol){
1309 pred= 1 << (bits - 1);
1314 if(x==0 && leftcol){
1315 pred= ptr16[-linesize];
1317 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1322 ptr16 += linesize >> 1;
1324 *ptr16= pred + ((unsigned)dc << point_transform);
1333 for (i = 0; i < nb_components; i++) {
1336 int n,
h, v, x, y,
c, j, linesize,
dc;
1345 if(bits>8) linesize /= 2;
1347 for (j = 0; j < n; j++) {
1353 if ( h * mb_x + x >= s->
width 1354 || v * mb_y + y >= s->
height) {
1356 }
else if (bits<=8) {
1358 (linesize * (v * mb_y + y)) +
1360 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1363 *ptr = pred + ((unsigned)dc << point_transform);
1365 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1366 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1369 *ptr16= pred + ((unsigned)dc << point_transform);
1390 int linesize,
int lowres)
1395 case 1:
copy_block4(dst, src, linesize, linesize, 4);
1397 case 2:
copy_block2(dst, src, linesize, linesize, 2);
1399 case 3: *dst = *
src;
1406 int block_x, block_y;
1409 for (block_y=0; block_y<
size; block_y++)
1410 for (block_x=0; block_x<
size; block_x++)
1411 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->
bits;
1413 for (block_y=0; block_y<
size; block_y++)
1414 for (block_x=0; block_x<
size; block_x++)
1415 *(ptr + block_x + block_y*linesize) <<= 8 - s->
bits;
1420 int Al,
const uint8_t *mb_bitmask,
1421 int mb_bitmask_size,
1424 int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1429 int bytes_per_pixel = 1 + (s->
bits > 8);
1446 for (i = 0; i < nb_components; i++) {
1449 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1454 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1455 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1466 for (i = 0; i < nb_components; i++) {
1468 int n,
h, v, x, y,
c, j;
1476 for (j = 0; j < n; j++) {
1477 block_offset = (((linesize[
c] * (v * mb_y + y) * 8) +
1478 (h * mb_x + x) * 8 * bytes_per_pixel) >> s->
avctx->
lowres);
1481 block_offset += linesize[
c] >> 1;
1482 if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->
width)
1483 && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->
height)) {
1484 ptr = data[
c] + block_offset;
1499 "error y=%d x=%d\n", mb_y, mb_x);
1519 "error y=%d x=%d\n", mb_y, mb_x);
1523 ff_dlog(s->
avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1526 (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1541 int se,
int Ah,
int Al)
1549 if (se < ss || se > 63) {
1560 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1568 for (mb_x = 0; mb_x < s->
mb_width; mb_x++,
block++, last_nnz++) {
1575 quant_matrix, ss, se, Al, &EOBRUN);
1578 quant_matrix, ss, se, Al, &EOBRUN);
1584 "error y=%d x=%d\n", mb_y, mb_x);
1599 const int bytes_per_pixel = 1 + (s->
bits > 8);
1600 const int block_size = s->
lossless ? 1 : 8;
1607 int mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1608 int mb_height = (s->
height + v * block_size - 1) / (v * block_size);
1614 data += linesize >> 1;
1616 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1620 for (mb_x = 0; mb_x < mb_width; mb_x++,
block++) {
1631 int mb_bitmask_size,
const AVFrame *reference)
1633 int len, nb_components,
i,
h, v, predictor, point_transform;
1635 const int block_size = s->
lossless ? 1 : 8;
1636 int ilv, prev_shift;
1640 "Can not process SOS before SOF, skipping\n");
1658 "decode_sos: nb_components (%d)",
1662 if (len != 6 + 2 * nb_components) {
1666 for (i = 0; i < nb_components; i++) {
1675 "decode_sos: index(%d) out of components\n", index);
1689 index = (index+2)%3;
1709 prev_shift = point_transform = 0;
1711 if (nb_components > 1) {
1715 }
else if (!s->
ls) {
1718 s->
mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1727 s->
lossless ?
"lossless" :
"sequential DCT", s->
rgb ?
"RGB" :
"",
1737 for (i = 0; i < nb_components; i++)
1758 point_transform, ilv)) < 0)
1767 nb_components)) < 0)
1776 point_transform)) < 0)
1780 prev_shift, point_transform,
1781 mb_bitmask, mb_bitmask_size, reference)) < 0)
1862 int t_w, t_h, v1, v2;
1880 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1891 if (len -10 - (t_w * t_h * 3) > 0)
1892 len -= t_w * t_h * 3;
1919 "Pegasus lossless jpeg header found\n");
1951 if (
id ==
AV_RL32(
"colr") && len > 0) {
1958 if (
id ==
AV_RL32(
"xfrm") && len > 0) {
1987 }
else if (type == 1) {
1999 if (!(flags & 0x04)) {
2009 int ret, le, ifd_offset, bytes_read;
2065 unsigned nummarkers;
2085 if (nummarkers == 0) {
2088 }
else if (s->
iccnum != 0 && nummarkers != s->
iccnum) {
2091 }
else if (seqno > nummarkers) {
2132 "mjpeg: error, decode_app parser read over the end\n");
2148 for (i = 0; i < len - 2; i++)
2150 if (i > 0 && cbuf[i - 1] ==
'\n')
2159 if (!strncmp(cbuf,
"AVID", 4)) {
2161 }
else if (!strcmp(cbuf,
"CS=ITU601"))
2163 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) && s->
avctx->
codec_tag) ||
2164 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
2166 else if (!strcmp(cbuf,
"MULTISCOPE II")) {
2186 buf_ptr = *pbuf_ptr;
2187 while (buf_end - buf_ptr > 1) {
2190 if ((v == 0xff) && (v2 >=
SOF0) && (v2 <=
COM) && buf_ptr < buf_end) {
2199 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
2200 *pbuf_ptr = buf_ptr;
2206 const uint8_t **unescaped_buf_ptr,
2207 int *unescaped_buf_size)
2217 if (start_code ==
SOS && !s->
ls) {
2222 #define copy_data_segment(skip) do { \ 2223 ptrdiff_t length = (ptr - src) - (skip); \ 2225 memcpy(dst, src, length); \ 2235 while (ptr < buf_end) {
2240 while (ptr < buf_end && x == 0xff) {
2255 if (x < RST0 || x >
RST7) {
2265 #undef copy_data_segment 2267 *unescaped_buf_ptr = s->
buffer;
2268 *unescaped_buf_size = dst - s->
buffer;
2269 memset(s->
buffer + *unescaped_buf_size, 0,
2273 (buf_end - *buf_ptr) - (dst - s->
buffer));
2274 }
else if (start_code ==
SOS && s->
ls) {
2282 while (src + t < buf_end) {
2285 while ((src + t < buf_end) && x == 0xff)
2300 if (x == 0xFF &&
b < t) {
2312 *unescaped_buf_ptr = dst;
2313 *unescaped_buf_size = (bit_count + 7) >> 3;
2314 memset(s->
buffer + *unescaped_buf_size, 0,
2317 *unescaped_buf_ptr = *buf_ptr;
2318 *unescaped_buf_size = buf_end - *buf_ptr;
2329 for (i = 0; i < s->
iccnum; i++)
2343 int buf_size = avpkt->
size;
2345 const uint8_t *buf_end, *buf_ptr;
2346 const uint8_t *unescaped_buf_ptr;
2348 int unescaped_buf_size;
2364 buf_end = buf + buf_size;
2365 while (buf_ptr < buf_end) {
2369 &unescaped_buf_size);
2371 if (start_code < 0) {
2373 }
else if (unescaped_buf_size > INT_MAX / 8) {
2375 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2376 start_code, unescaped_buf_size, buf_size);
2380 start_code, buf_end - buf_ptr);
2394 if (start_code >=
RST0 && start_code <=
RST7) {
2396 "restart marker: %d\n", start_code & 0x0f);
2398 }
else if (start_code >=
APP0 && start_code <=
APP15) {
2403 }
else if (start_code ==
COM) {
2407 }
else if (start_code ==
DQT) {
2416 (start_code ==
SOF48 || start_code ==
LSE)) {
2422 switch(start_code) {
2437 switch (start_code) {
2453 if (start_code ==
SOF0)
2502 "Found EOI before any SOF, ignoring\n");
2513 goto the_end_no_picture;
2531 int qpw = (s->
width + 15) / 16;
2534 memset(qp_table_buf->
data, qp, qpw);
2572 "mjpeg: unsupported coding type (%x)\n", start_code);
2580 "marker parser used %d bytes (%d bits)\n",
2629 for (i = 0; i <
h; i++) {
2631 if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2632 else line[w - 1] = line[(w - 1) / 2];
2633 for (index = w - 2; index > 0; index--) {
2635 ((uint16_t*)line)[
index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2637 line[
index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2641 ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2643 ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2645 line[w - 1] = line[(w - 1) / 3];
2647 line[w - 2] = line[w - 1];
2649 for (index = w - 3; index > 0; index--) {
2650 line[
index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2689 for (i = h - 1;
i; i--) {
2692 if (s->
upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2693 memcpy(dst, src1, w);
2695 for (index = 0; index <
w; index++)
2696 dst[index] = (src1[index] + src2[index]) >> 1;
2713 if(index && index<3){
2719 for (i=0; i<h/2; i++) {
2721 FFSWAP(
int, dst[j], dst2[j]);
2732 for (i=0; i<
h; i++) {
2735 for (index=0; index<4; index++) {
2739 for (j=0; j<
w; j++) {
2741 int r = dst[0][j] * k;
2742 int g = dst[1][j] * k;
2743 int b = dst[2][j] * k;
2744 dst[0][j] = g*257 >> 16;
2745 dst[1][j] = b*257 >> 16;
2746 dst[2][j] = r*257 >> 16;
2755 for (i=0; i<
h; i++) {
2758 for (index=0; index<4; index++) {
2762 for (j=0; j<
w; j++) {
2764 int r = (255 - dst[0][j]) * k;
2765 int g = (128 - dst[1][j]) * k;
2766 int b = (128 - dst[2][j]) * k;
2767 dst[0][j] = r*257 >> 16;
2768 dst[1][j] = (g*257 >> 16) + 128;
2769 dst[2][j] = (b*257 >> 16) + 128;
2791 for (i = 0; i < s->
iccnum; i++)
2801 for (i = 0; i < s->
iccnum; i++) {
2814 return buf_ptr - buf;
2839 for (i = 0; i < 3; i++) {
2840 for (j = 0; j < 4; j++)
2862 #if CONFIG_MJPEG_DECODER 2863 #define OFFSET(x) offsetof(MJpegDecodeContext, x) 2864 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 2866 {
"extern_huff",
"Use external huffman table.",
2871 static const AVClass mjpegdec_class = {
2890 .priv_class = &mjpegdec_class,
2895 #if CONFIG_MJPEG_NVDEC_HWACCEL 2898 #if CONFIG_MJPEG_VAAPI_HWACCEL 2905 #if CONFIG_THP_DECODER int block_stride[MAX_COMPONENTS]
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
const struct AVCodec * codec
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int v_count[MAX_COMPONENTS]
#define se(name, range_min, range_max)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
size_t raw_image_buffer_size
void(* clear_block)(int16_t *block)
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
static int mjpeg_decode_com(MJpegDecodeContext *s)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
TIFF constants & data structures.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
#define HWACCEL_NVDEC(codec)
int qscale[4]
quantizer scale calculated from quant_matrixes
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int dc_index[MAX_COMPONENTS]
int linesize[MAX_COMPONENTS]
linesize << interlaced
Views are next to each other.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int comp_index[MAX_COMPONENTS]
static void reset_icc_profile(MJpegDecodeContext *s)
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
enum AVDiscard skip_frame
Skip decoding for selected frames.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
const uint8_t * raw_image_buffer
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mjpeg_decode_dri(MJpegDecodeContext *s)
8 bits with AV_PIX_FMT_RGB32 palette
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
#define FF_DEBUG_PICT_INFO
uint16_t(* ljpeg_buffer)[4]
unsigned int ljpeg_buffer_size
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_CODEC_PROPERTY_LOSSLESS
#define FF_PROFILE_MJPEG_JPEG_LS
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
uint8_t * last_nnz[MAX_COMPONENTS]
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Structure to hold side data for an AVFrame.
int quant_sindex[MAX_COMPONENTS]
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
static int get_bits_count(const GetBitContext *s)
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
int h_count[MAX_COMPONENTS]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Video is not stereoscopic (and metadata has to be there).
#define AV_PIX_FMT_YUVA420P16
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define PREDICT(ret, topleft, top, left, predictor)
static int aligned(int val)
static int get_bits_left(GetBitContext *gb)
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
AVDictionary * exif_metadata
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define UPDATE_CACHE(name, gb)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int flags
Additional information about the frame packing.
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
static const uint16_t mask[17]
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define PTRDIFF_SPECIFIER
int nb_blocks[MAX_COMPONENTS]
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Views are packed per line, as if interlaced.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
#define AV_PIX_FMT_YUVA444P16
#define av_fourcc2str(fourcc)
int flags
AV_CODEC_FLAG_*.
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
static const uint8_t offset[127][2]
#define CLOSE_READER(name, gb)
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
#define ss(width, name, subs,...)
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_PIX_FMT_GRAY16
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int component_id[MAX_COMPONENTS]
static int mjpeg_decode_app(MJpegDecodeContext *s)
uint8_t raw_huffman_lengths[2][4][16]
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define LAST_SKIP_BITS(name, gb, num)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int v_scount[MAX_COMPONENTS]
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
uint8_t idct_permutation[64]
IDCT input permutation.
packed RGB 8:8:8, 24bpp, BGRBGR...
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define is(width, name, range_min, range_max, subs,...)
HW acceleration through CUDA.
#define SHOW_UBITS(name, gb, num)
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_PIX_FMT_YUV420P16
static av_always_inline int bytestream2_tell(GetByteContext *g)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
Libavcodec external API header.
Views are on top of each other.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
main external API structure.
uint8_t * data
The data buffer.
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define OPEN_READER(name, gb)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
const uint8_t avpriv_mjpeg_val_dc[12]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static unsigned int get_bits1(GetBitContext *s)
static void init_idct(AVCodecContext *avctx)
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
int ac_index[MAX_COMPONENTS]
enum AVColorSpace colorspace
YUV colorspace type.
Rational number (pair of numerator and denominator).
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define GET_CACHE(name, gb)
const uint8_t ff_zigzag_direct[64]
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
#define CONFIG_JPEGLS_DECODER
enum AVPixelFormat hwaccel_pix_fmt
static enum AVPixelFormat pix_fmts[]
uint8_t raw_huffman_values[2][4][256]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
#define flags(name, subs,...)
size_t raw_scan_buffer_size
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
the normal 219*2^(n-8) "MPEG" YUV ranges
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
const AVProfile ff_mjpeg_profiles[]
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
A reference to a data buffer.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const OptionDef options[]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
planar GBRA 4:4:4:4 32bpp
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
unsigned properties
Properties of the stream that gets decoded.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
const uint8_t * raw_scan_buffer
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
#define FF_DEBUG_STARTCODE
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
int got_picture
we found a SOF and picture is valid, too.
#define HWACCEL_VAAPI(codec)
const uint8_t avpriv_mjpeg_val_ac_luminance[]
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
void * hwaccel_picture_private
VLC_TYPE(* table)[2]
code, bits
Y , 16bpp, little-endian.
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int last_dc[MAX_COMPONENTS]
static const uint8_t * align_get_bits(GetBitContext *s)
static int init_default_huffman_tables(MJpegDecodeContext *s)
static void decode_flush(AVCodecContext *avctx)
int frame_number
Frame counter, set by libavcodec.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
#define FFSWAP(type, a, b)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
#define FF_QSCALE_TYPE_MPEG1
#define MKTAG(a, b, c, d)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
This structure stores compressed data.
uint16_t quant_matrixes[4][64]
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_CEIL_RSHIFT(a, b)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().