![]() |
![]() |
![]() |
GStreamer Base Plugins 1.0 Library Reference Manual | ![]() |
---|---|---|---|---|
Top | Description |
#include <gst/video/video.h> gboolean gst_video_calculate_display_ratio (guint *dar_n
,guint *dar_d
,guint video_width
,guint video_height
,guint video_par_n
,guint video_par_d
,guint display_par_n
,guint display_par_d
); gboolean gst_video_guess_framerate (GstClockTime duration
,gint *dest_n
,gint *dest_d
); void (*GstVideoConvertSampleCallback) (GstSample *sample
,GError *error
,gpointer user_data
); GstSample * gst_video_convert_sample (GstSample *sample
,const GstCaps *to_caps
,GstClockTime timeout
,GError **error
); void gst_video_convert_sample_async (GstSample *sample
,const GstCaps *to_caps
,GstClockTime timeout
,GstVideoConvertSampleCallback callback
,gpointer user_data
,GDestroyNotify destroy_notify
); struct GstVideoAlignment; void gst_video_alignment_reset (GstVideoAlignment *align
); #define GST_META_TAG_VIDEO_STR #define GST_META_TAG_VIDEO_ORIENTATION_STR #define GST_META_TAG_VIDEO_SIZE_STR #define GST_META_TAG_VIDEO_COLORSPACE_STR GstEvent * gst_video_event_new_still_frame (gboolean in_still
); gboolean gst_video_event_parse_still_frame (GstEvent *event
,gboolean *in_still
); GstEvent * gst_video_event_new_downstream_force_key_unit (GstClockTime timestamp
,GstClockTime stream_time
,GstClockTime running_time
,gboolean all_headers
,guint count
); gboolean gst_video_event_parse_downstream_force_key_unit (GstEvent *event
,GstClockTime *timestamp
,GstClockTime *stream_time
,GstClockTime *running_time
,gboolean *all_headers
,guint *count
); GstEvent * gst_video_event_new_upstream_force_key_unit (GstClockTime running_time
,gboolean all_headers
,guint count
); gboolean gst_video_event_parse_upstream_force_key_unit (GstEvent *event
,GstClockTime *running_time
,gboolean *all_headers
,guint *count
); gboolean gst_video_event_is_force_key_unit (GstEvent *event
); enum GstVideoFormat; #define GST_VIDEO_MAX_PLANES #define GST_VIDEO_MAX_COMPONENTS struct GstVideoFormatInfo; enum GstVideoFormatFlags; enum GstVideoPackFlags; void (*GstVideoFormatUnpack) (const GstVideoFormatInfo *info
,GstVideoPackFlags flags
,gpointer dest
,const gpointer data[GST_VIDEO_MAX_PLANES]
,const gint stride[GST_VIDEO_MAX_PLANES]
,gint x
,gint y
,gint width
); void (*GstVideoFormatPack) (const GstVideoFormatInfo *info
,GstVideoPackFlags flags
,const gpointer src
,gint sstride
,gpointer data[GST_VIDEO_MAX_PLANES]
,const gint stride[GST_VIDEO_MAX_PLANES]
,GstVideoChromaSite chroma_site
,gint y
,gint width
); #define GST_VIDEO_FORMAT_INFO_FORMAT (info) #define GST_VIDEO_FORMAT_INFO_NAME (info) #define GST_VIDEO_FORMAT_INFO_FLAGS (info) #define GST_VIDEO_FORMAT_INFO_IS_YUV (info) #define GST_VIDEO_FORMAT_INFO_IS_RGB (info) #define GST_VIDEO_FORMAT_INFO_IS_GRAY (info) #define GST_VIDEO_FORMAT_INFO_HAS_ALPHA (info) #define GST_VIDEO_FORMAT_INFO_IS_LE (info) #define GST_VIDEO_FORMAT_INFO_HAS_PALETTE (info) #define GST_VIDEO_FORMAT_INFO_IS_COMPLEX (info) #define GST_VIDEO_FORMAT_INFO_BITS (info) #define GST_VIDEO_FORMAT_INFO_N_COMPONENTS (info) #define GST_VIDEO_FORMAT_INFO_SHIFT (info, c) #define GST_VIDEO_FORMAT_INFO_DEPTH (info, c) #define GST_VIDEO_FORMAT_INFO_PSTRIDE (info, c) #define GST_VIDEO_FORMAT_INFO_N_PLANES (info) #define GST_VIDEO_FORMAT_INFO_PLANE (info, c) #define GST_VIDEO_FORMAT_INFO_POFFSET (info, c) #define GST_VIDEO_FORMAT_INFO_W_SUB (info, c) #define GST_VIDEO_FORMAT_INFO_H_SUB (info, c) #define GST_VIDEO_SUB_SCALE (scale, val) #define GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (info, c, w) #define GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (info, c, h) #define GST_VIDEO_FORMAT_INFO_DATA (info, planes, comp) #define GST_VIDEO_FORMAT_INFO_STRIDE (info, strides, comp) #define GST_VIDEO_FORMAT_INFO_OFFSET (info, offsets, comp) #define GST_VIDEO_FORMAT_INFO_TILE_MODE (info) #define GST_VIDEO_FORMAT_INFO_TILE_WS (info) #define GST_VIDEO_FORMAT_INFO_TILE_HS (info) GstVideoFormat gst_video_format_from_masks (gint depth
,gint bpp
,gint endianness
,guint red_mask
,guint green_mask
,guint blue_mask
,guint alpha_mask
); GstVideoFormat gst_video_format_from_fourcc (guint32 fourcc
); guint32 gst_video_format_to_fourcc (GstVideoFormat format
); GstVideoFormat gst_video_format_from_string (const gchar *format
); const gchar * gst_video_format_to_string (GstVideoFormat format
); const GstVideoFormatInfo * gst_video_format_get_info (GstVideoFormat format
); #define GST_VIDEO_SIZE_RANGE #define GST_VIDEO_FPS_RANGE #define GST_VIDEO_FORMATS_ALL #define GST_VIDEO_CAPS_MAKE (format) enum GstVideoColorRange; void gst_video_color_range_offsets (GstVideoColorRange range
,const GstVideoFormatInfo *info
,gint offset[GST_VIDEO_MAX_COMPONENTS]
,gint scale[GST_VIDEO_MAX_COMPONENTS]
); enum GstVideoColorMatrix; gboolean gst_video_color_matrix_get_Kr_Kb (GstVideoColorMatrix matrix
,gdouble *Kr
,gdouble *Kb
); enum GstVideoColorPrimaries; enum GstVideoTransferFunction; gdouble gst_video_color_transfer_decode (GstVideoTransferFunction func
,gdouble val
); gdouble gst_video_color_transfer_encode (GstVideoTransferFunction func
,gdouble val
); GstVideoColorimetry; gboolean gst_video_colorimetry_matches (GstVideoColorimetry *cinfo
,const gchar *color
); gboolean gst_video_colorimetry_is_equal (const GstVideoColorimetry *cinfo
,const GstVideoColorimetry *other
); gboolean gst_video_colorimetry_from_string (GstVideoColorimetry *cinfo
,const gchar *color
); gchar * gst_video_colorimetry_to_string (GstVideoColorimetry *cinfo
); struct GstVideoInfo; enum GstVideoInterlaceMode; enum GstVideoMultiviewMode; enum GstVideoMultiviewFramePacking; enum GstVideoMultiviewFlags; enum GstVideoFlags; #define GST_VIDEO_INFO_FORMAT (i) #define GST_VIDEO_INFO_NAME (i) #define GST_VIDEO_INFO_IS_YUV (i) #define GST_VIDEO_INFO_IS_RGB (i) #define GST_VIDEO_INFO_IS_GRAY (i) #define GST_VIDEO_INFO_HAS_ALPHA (i) #define GST_VIDEO_INFO_INTERLACE_MODE (i) #define GST_VIDEO_INFO_IS_INTERLACED (i) #define GST_VIDEO_INFO_FLAGS (i) #define GST_VIDEO_INFO_WIDTH (i) #define GST_VIDEO_INFO_HEIGHT (i) #define GST_VIDEO_INFO_SIZE (i) #define GST_VIDEO_INFO_VIEWS (i) #define GST_VIDEO_INFO_PAR_N (i) #define GST_VIDEO_INFO_PAR_D (i) #define GST_VIDEO_INFO_FPS_N (i) #define GST_VIDEO_INFO_FPS_D (i) #define GST_VIDEO_INFO_FLAG_IS_SET (i, flag) #define GST_VIDEO_INFO_FLAG_SET (i, flag) #define GST_VIDEO_INFO_FLAG_UNSET (i, flag) #define GST_VIDEO_INFO_N_PLANES (i) #define GST_VIDEO_INFO_PLANE_OFFSET (i, p) #define GST_VIDEO_INFO_PLANE_STRIDE (i, p) #define GST_VIDEO_INFO_N_COMPONENTS (i) #define GST_VIDEO_INFO_COMP_DEPTH (i, c) #define GST_VIDEO_INFO_COMP_DATA (i, d, c) #define GST_VIDEO_INFO_COMP_OFFSET (i, c) #define GST_VIDEO_INFO_COMP_STRIDE (i, c) #define GST_VIDEO_INFO_COMP_WIDTH (i, c) #define GST_VIDEO_INFO_COMP_HEIGHT (i, c) #define GST_VIDEO_INFO_COMP_PLANE (i, c) #define GST_VIDEO_INFO_COMP_PSTRIDE (i, c) #define GST_VIDEO_INFO_COMP_POFFSET (i, c) #define GST_VIDEO_INFO_CHROMA_SITE (i) #define GST_VIDEO_INFO_COLORIMETRY (i) #define GST_VIDEO_INFO_MULTIVIEW_FLAGS (i) #define GST_VIDEO_INFO_MULTIVIEW_MODE (i) void gst_video_info_init (GstVideoInfo *info
); GstVideoInfo * gst_video_info_new (void
); GstVideoInfo * gst_video_info_copy (const GstVideoInfo *info
); void gst_video_info_free (GstVideoInfo *info
); void gst_video_info_set_format (GstVideoInfo *info
,GstVideoFormat format
,guint width
,guint height
); gboolean gst_video_info_from_caps (GstVideoInfo *info
,const GstCaps *caps
); GstCaps * gst_video_info_to_caps (GstVideoInfo *info
); gboolean gst_video_info_convert (GstVideoInfo *info
,GstFormat src_format
,gint64 src_value
,GstFormat dest_format
,gint64 *dest_value
); gboolean gst_video_info_is_equal (const GstVideoInfo *info
,const GstVideoInfo *other
); void gst_video_info_align (GstVideoInfo *info
,GstVideoAlignment *align
); struct GstVideoFrame; enum GstVideoFrameFlags; gboolean gst_video_frame_map_id (GstVideoFrame *frame
,GstVideoInfo *info
,GstBuffer *buffer
,gint id
,GstMapFlags flags
); gboolean gst_video_frame_map (GstVideoFrame *frame
,GstVideoInfo *info
,GstBuffer *buffer
,GstMapFlags flags
); void gst_video_frame_unmap (GstVideoFrame *frame
); gboolean gst_video_frame_copy (GstVideoFrame *dest
,const GstVideoFrame *src
); gboolean gst_video_frame_copy_plane (GstVideoFrame *dest
,const GstVideoFrame *src
,guint plane
); #define GST_VIDEO_FRAME_FORMAT (f) #define GST_VIDEO_FRAME_WIDTH (f) #define GST_VIDEO_FRAME_HEIGHT (f) #define GST_VIDEO_FRAME_SIZE (f) #define GST_VIDEO_FRAME_FLAGS (f) #define GST_VIDEO_FRAME_FLAG_IS_SET (f, fl) #define GST_VIDEO_FRAME_IS_INTERLACED (f) #define GST_VIDEO_FRAME_IS_TFF (f) #define GST_VIDEO_FRAME_IS_RFF (f) #define GST_VIDEO_FRAME_IS_ONEFIELD (f) #define GST_VIDEO_FRAME_N_PLANES (f) #define GST_VIDEO_FRAME_PLANE_DATA (f, p) #define GST_VIDEO_FRAME_PLANE_OFFSET (f, p) #define GST_VIDEO_FRAME_PLANE_STRIDE (f, p) #define GST_VIDEO_FRAME_N_COMPONENTS (f) #define GST_VIDEO_FRAME_COMP_DEPTH (f, c) #define GST_VIDEO_FRAME_COMP_DATA (f, c) #define GST_VIDEO_FRAME_COMP_STRIDE (f, c) #define GST_VIDEO_FRAME_COMP_OFFSET (f, c) #define GST_VIDEO_FRAME_COMP_WIDTH (f, c) #define GST_VIDEO_FRAME_COMP_HEIGHT (f, c) #define GST_VIDEO_FRAME_COMP_PLANE (f, c) #define GST_VIDEO_FRAME_COMP_PSTRIDE (f, c) #define GST_VIDEO_FRAME_COMP_POFFSET (f, c) enum GstVideoBufferFlags; enum GstVideoTileType; enum GstVideoTileMode; guint gst_video_tile_get_index (GstVideoTileMode mode
,gint x
,gint y
,gint x_tiles
,gint y_tiles
); #define GST_VIDEO_TILE_MAKE_MODE (num, type) #define GST_VIDEO_TILE_MODE_TYPE (mode) #define GST_VIDEO_TILE_MODE_IS_INDEXED (mode) #define GST_VIDEO_TILE_MAKE_STRIDE (x_tiles, y_tiles) #define GST_VIDEO_TILE_X_TILES (stride) #define GST_VIDEO_TILE_Y_TILES (stride) gboolean gst_video_blend (GstVideoFrame *dest
,GstVideoFrame *src
,gint x
,gint y
,gfloat global_alpha
); void gst_video_blend_scale_linear_RGBA (GstVideoInfo *src
,GstBuffer *src_buffer
,gint dest_height
,gint dest_width
,GstVideoInfo *dest
,GstBuffer **dest_buffer
); GstVideoConverter; GstVideoConverter * gst_video_converter_new (GstVideoInfo *in_info
,GstVideoInfo *out_info
,GstStructure *config
); void gst_video_converter_free (GstVideoConverter *convert
); const GstStructure * gst_video_converter_get_config (GstVideoConverter *convert
); gboolean gst_video_converter_set_config (GstVideoConverter *convert
,GstStructure *config
); void gst_video_converter_frame (GstVideoConverter *convert
,const GstVideoFrame *src
,GstVideoFrame *dest
); const GValue * gst_video_multiview_get_mono_modes (void
); const GValue * gst_video_multiview_get_unpacked_modes (void
); const GValue * gst_video_multiview_get_doubled_height_modes (void
); const GValue * gst_video_multiview_get_doubled_size_modes (void
); const GValue * gst_video_multiview_get_doubled_width_modes (void
); GstVideoMultiviewMode gst_video_multiview_mode_from_caps_string (const gchar *caps_mview_mode
); const gchar * gst_video_multiview_mode_to_caps_string (GstVideoMultiviewMode mview_mode
); gboolean gst_video_multiview_guess_half_aspect (GstVideoMultiviewMode mv_mode
,guint width
,guint height
,guint par_n
,guint par_d
); void gst_video_multiview_video_info_change_mode (GstVideoInfo *info
,GstVideoMultiviewMode out_mview_mode
,GstVideoMultiviewFlags out_mview_flags
);
This library contains some helper functions and includes the videosink and videofilter base classes.
gboolean gst_video_calculate_display_ratio (guint *dar_n
,guint *dar_d
,guint video_width
,guint video_height
,guint video_par_n
,guint video_par_d
,guint display_par_n
,guint display_par_d
);
Given the Pixel Aspect Ratio and size of an input video frame, and the pixel aspect ratio of the intended display device, calculates the actual display ratio the video will be rendered with.
|
Numerator of the calculated display_ratio. [out] |
|
Denominator of the calculated display_ratio. [out] |
|
Width of the video frame in pixels |
|
Height of the video frame in pixels |
|
Numerator of the pixel aspect ratio of the input video. |
|
Denominator of the pixel aspect ratio of the input video. |
|
Numerator of the pixel aspect ratio of the display device |
|
Denominator of the pixel aspect ratio of the display device |
Returns : |
A boolean indicating success and a calculated Display Ratio in the dar_n and dar_d parameters. The return value is FALSE in the case of integer overflow or other error. |
gboolean gst_video_guess_framerate (GstClockTime duration
,gint *dest_n
,gint *dest_d
);
Given the nominal duration of one video frame, this function will check some standard framerates for a close match (within 0.1%) and return one if possible,
It will calculate an arbitrary framerate if no close
match was found, and return FALSE
.
It returns FALSE
if a duration of 0 is passed.
|
Nominal duration of one frame |
|
Numerator of the calculated framerate. [out][allow-none] |
|
Denominator of the calculated framerate. [out][allow-none] |
Returns : |
TRUE if a close "standard" framerate was
recognised, and FALSE otherwise. |
Since 1.6
void (*GstVideoConvertSampleCallback) (GstSample *sample
,GError *error
,gpointer user_data
);
GstSample * gst_video_convert_sample (GstSample *sample
,const GstCaps *to_caps
,GstClockTime timeout
,GError **error
);
Converts a raw video buffer into the specified output caps.
The output caps can be any raw video formats or any image formats (jpeg, png, ...).
The width, height and pixel-aspect-ratio can also be specified in the output caps.
|
a GstSample |
|
the GstCaps to convert to |
|
the maximum amount of time allowed for the processing. |
|
pointer to a GError. Can be NULL . |
Returns : |
The converted GstSample, or NULL if an error happened (in which case err
will point to the GError). |
void gst_video_convert_sample_async (GstSample *sample
,const GstCaps *to_caps
,GstClockTime timeout
,GstVideoConvertSampleCallback callback
,gpointer user_data
,GDestroyNotify destroy_notify
);
Converts a raw video buffer into the specified output caps.
The output caps can be any raw video formats or any image formats (jpeg, png, ...).
The width, height and pixel-aspect-ratio can also be specified in the output caps.
callback
will be called after conversion, when an error occured or if conversion didn't
finish after timeout
. callback
will always be called from the thread default
GMainContext
, see g_main_context_get_thread_default()
. If GLib before 2.22 is used,
this will always be the global default main context.
destroy_notify
will be called after the callback was called and user_data
is not needed
anymore.
|
a GstSample |
|
the GstCaps to convert to |
|
the maximum amount of time allowed for the processing. |
|
GstVideoConvertSampleCallback that will be called after conversion. |
|
extra data that will be passed to the callback
|
|
GDestroyNotify to be called after user_data is not needed anymore |
struct GstVideoAlignment { guint padding_top; guint padding_bottom; guint padding_left; guint padding_right; guint stride_align[GST_VIDEO_MAX_PLANES]; };
Extra alignment paramters for the memory of video buffers. This structure is usually used to configure the bufferpool if it supports the GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT.
void gst_video_alignment_reset (GstVideoAlignment *align
);
Set align
to its default values with no padding and no alignment.
|
a GstVideoAlignment |
#define GST_META_TAG_VIDEO_STR "video"
This metadata is relevant for video streams.
Since 1.2
#define GST_META_TAG_VIDEO_ORIENTATION_STR "orientation"
This metadata stays relevant as long as video orientation is unchanged.
Since 1.2
#define GST_META_TAG_VIDEO_SIZE_STR "size"
This metadata stays relevant as long as video size is unchanged.
Since 1.2
#define GST_META_TAG_VIDEO_COLORSPACE_STR "colorspace"
This metadata stays relevant as long as video colorspace is unchanged.
Since 1.2
GstEvent * gst_video_event_new_still_frame (gboolean in_still
);
Creates a new Still Frame event. If in_still
is TRUE
, then the event
represents the start of a still frame sequence. If it is FALSE
, then
the event ends a still frame sequence.
To parse an event created by gst_video_event_new_still_frame()
use
gst_video_event_parse_still_frame()
.
|
boolean value for the still-frame state of the event. |
Returns : |
The new GstEvent |
gboolean gst_video_event_parse_still_frame (GstEvent *event
,gboolean *in_still
);
Parse a GstEvent, identify if it is a Still Frame event, and return the still-frame state from the event if it is. If the event represents the start of a still frame, the in_still variable will be set to TRUE, otherwise FALSE. It is OK to pass NULL for the in_still variable order to just check whether the event is a valid still-frame event.
Create a still frame event using gst_video_event_new_still_frame()
|
A GstEvent to parse |
|
A boolean to receive the still-frame status from the event, or NULL |
Returns : |
TRUE if the event is a valid still-frame event. FALSE if not |
GstEvent * gst_video_event_new_downstream_force_key_unit (GstClockTime timestamp
,GstClockTime stream_time
,GstClockTime running_time
,gboolean all_headers
,guint count
);
Creates a new downstream force key unit event. A downstream force key unit event can be sent down the pipeline to request downstream elements to produce a key unit. A downstream force key unit event must also be sent when handling an upstream force key unit event to notify downstream that the latter has been handled.
To parse an event created by gst_video_event_new_downstream_force_key_unit()
use
gst_video_event_parse_downstream_force_key_unit()
.
|
the timestamp of the buffer that starts a new key unit |
|
the stream_time of the buffer that starts a new key unit |
|
the running_time of the buffer that starts a new key unit |
|
TRUE to produce headers when starting a new key unit |
|
integer that can be used to number key units |
Returns : |
The new GstEvent |
gboolean gst_video_event_parse_downstream_force_key_unit (GstEvent *event
,GstClockTime *timestamp
,GstClockTime *stream_time
,GstClockTime *running_time
,gboolean *all_headers
,guint *count
);
Get timestamp, stream-time, running-time, all-headers and count in the force
key unit event. See gst_video_event_new_downstream_force_key_unit()
for a
full description of the downstream force key unit event.
running_time
will be adjusted for any pad offsets of pads it was passing through.
|
A GstEvent to parse |
|
A pointer to the timestamp in the event. [out] |
|
A pointer to the stream-time in the event. [out] |
|
A pointer to the running-time in the event. [out] |
|
A pointer to the all_headers flag in the event. [out] |
|
A pointer to the count field of the event. [out] |
Returns : |
TRUE if the event is a valid downstream force key unit event. |
GstEvent * gst_video_event_new_upstream_force_key_unit (GstClockTime running_time
,gboolean all_headers
,guint count
);
Creates a new upstream force key unit event. An upstream force key unit event can be sent to request upstream elements to produce a key unit.
running_time
can be set to request a new key unit at a specific
running_time. If set to GST_CLOCK_TIME_NONE, upstream elements will produce a
new key unit as soon as possible.
To parse an event created by gst_video_event_new_downstream_force_key_unit()
use
gst_video_event_parse_downstream_force_key_unit()
.
|
the running_time at which a new key unit should be produced |
|
TRUE to produce headers when starting a new key unit |
|
integer that can be used to number key units |
Returns : |
The new GstEvent |
gboolean gst_video_event_parse_upstream_force_key_unit (GstEvent *event
,GstClockTime *running_time
,gboolean *all_headers
,guint *count
);
Get running-time, all-headers and count in the force key unit event. See
gst_video_event_new_upstream_force_key_unit()
for a full description of the
upstream force key unit event.
Create an upstream force key unit event using gst_video_event_new_upstream_force_key_unit()
running_time
will be adjusted for any pad offsets of pads it was passing through.
|
A GstEvent to parse |
|
A pointer to the running_time in the event. [out] |
|
A pointer to the all_headers flag in the event. [out] |
|
A pointer to the count field in the event. [out] |
Returns : |
TRUE if the event is a valid upstream force-key-unit event. FALSE if not |
gboolean gst_video_event_is_force_key_unit (GstEvent *event
);
Checks if an event is a force key unit event. Returns true for both upstream and downstream force key unit events.
|
A GstEvent to check |
Returns : |
TRUE if the event is a valid force key unit event |
typedef enum { GST_VIDEO_FORMAT_UNKNOWN, GST_VIDEO_FORMAT_ENCODED, GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_RGBx, GST_VIDEO_FORMAT_BGRx, GST_VIDEO_FORMAT_xRGB, GST_VIDEO_FORMAT_xBGR, GST_VIDEO_FORMAT_RGBA, GST_VIDEO_FORMAT_BGRA, GST_VIDEO_FORMAT_ARGB, GST_VIDEO_FORMAT_ABGR, GST_VIDEO_FORMAT_RGB, GST_VIDEO_FORMAT_BGR, GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YVYU, GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_v216, GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV21, GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_GRAY16_BE, GST_VIDEO_FORMAT_GRAY16_LE, GST_VIDEO_FORMAT_v308, GST_VIDEO_FORMAT_RGB16, GST_VIDEO_FORMAT_BGR16, GST_VIDEO_FORMAT_RGB15, GST_VIDEO_FORMAT_BGR15, GST_VIDEO_FORMAT_UYVP, GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_RGB8P, GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_IYU1, GST_VIDEO_FORMAT_ARGB64, GST_VIDEO_FORMAT_AYUV64, GST_VIDEO_FORMAT_r210, GST_VIDEO_FORMAT_I420_10BE, GST_VIDEO_FORMAT_I420_10LE, GST_VIDEO_FORMAT_I422_10BE, GST_VIDEO_FORMAT_I422_10LE, GST_VIDEO_FORMAT_Y444_10BE, GST_VIDEO_FORMAT_Y444_10LE, GST_VIDEO_FORMAT_GBR, GST_VIDEO_FORMAT_GBR_10BE, GST_VIDEO_FORMAT_GBR_10LE, GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV12_64Z32, GST_VIDEO_FORMAT_A420_10BE, GST_VIDEO_FORMAT_A420_10LE, GST_VIDEO_FORMAT_A422_10BE, GST_VIDEO_FORMAT_A422_10LE, GST_VIDEO_FORMAT_A444_10BE, GST_VIDEO_FORMAT_A444_10LE, GST_VIDEO_FORMAT_NV61, } GstVideoFormat;
Enum value describing the most common video formats.
Unknown or unset video format id | |
Encoded video format. Only ever use that in caps for special video formats in combination with non-system memory GstCapsFeatures where it does not make sense to specify a real video format. | |
planar 4:2:0 YUV | |
planar 4:2:0 YVU (like I420 but UV planes swapped) | |
packed 4:2:2 YUV (Y0-U0-Y1-V0 Y2-U2-Y3-V2 Y4 ...) | |
packed 4:2:2 YUV (U0-Y0-V0-Y1 U2-Y2-V2-Y3 U4 ...) | |
packed 4:4:4 YUV with alpha channel (A0-Y0-U0-V0 ...) | |
sparse rgb packed into 32 bit, space last | |
sparse reverse rgb packed into 32 bit, space last | |
sparse rgb packed into 32 bit, space first | |
sparse reverse rgb packed into 32 bit, space first | |
rgb with alpha channel last | |
reverse rgb with alpha channel last | |
rgb with alpha channel first | |
reverse rgb with alpha channel first | |
rgb | |
reverse rgb | |
planar 4:1:1 YUV | |
planar 4:2:2 YUV | |
packed 4:2:2 YUV (Y0-V0-Y1-U0 Y2-V2-Y3-U2 Y4 ...) | |
planar 4:4:4 YUV | |
packed 4:2:2 10-bit YUV, complex format | |
packed 4:2:2 16-bit YUV, Y0-U0-Y1-V1 order | |
planar 4:2:0 YUV with interleaved UV plane | |
planar 4:2:0 YUV with interleaved VU plane | |
8-bit grayscale | |
16-bit grayscale, most significant byte first | |
16-bit grayscale, least significant byte first | |
packed 4:4:4 YUV | |
rgb 5-6-5 bits per component | |
reverse rgb 5-6-5 bits per component | |
rgb 5-5-5 bits per component | |
reverse rgb 5-5-5 bits per component | |
packed 10-bit 4:2:2 YUV (U0-Y0-V0-Y1 U2-Y2-V2-Y3 U4 ...) | |
planar 4:4:2:0 AYUV | |
8-bit paletted RGB | |
planar 4:1:0 YUV | |
planar 4:1:0 YUV (like YUV9 but UV planes swapped) | |
packed 4:1:1 YUV (Cb-Y0-Y1-Cr-Y2-Y3 ...) | |
rgb with alpha channel first, 16 bits per channel | |
packed 4:4:4 YUV with alpha channel, 16 bits per channel (A0-Y0-U0-V0 ...) | |
packed 4:4:4 RGB, 10 bits per channel | |
planar 4:2:0 YUV, 10 bits per channel | |
planar 4:2:0 YUV, 10 bits per channel | |
planar 4:2:2 YUV, 10 bits per channel | |
planar 4:2:2 YUV, 10 bits per channel | |
planar 4:4:4 YUV, 10 bits per channel | |
planar 4:4:4 YUV, 10 bits per channel | |
planar 4:4:4 RGB, 8 bits per channel | |
planar 4:4:4 RGB, 10 bits per channel | |
planar 4:4:4 RGB, 10 bits per channel | |
planar 4:2:2 YUV with interleaved UV plane | |
planar 4:4:4 YUV with interleaved UV plane | |
NV12 with 64x32 tiling in zigzag pattern | |
planar 4:4:2:0 YUV, 10 bits per channel | |
planar 4:4:2:0 YUV, 10 bits per channel | |
planar 4:4:2:2 YUV, 10 bits per channel | |
planar 4:4:2:2 YUV, 10 bits per channel | |
planar 4:4:4:4 YUV, 10 bits per channel | |
planar 4:4:4:4 YUV, 10 bits per channel | |
planar 4:2:2 YUV with interleaved VU plane (Since 1.6) |
struct GstVideoFormatInfo { GstVideoFormat format; const gchar *name; const gchar *description; GstVideoFormatFlags flags; guint bits; guint n_components; guint shift[GST_VIDEO_MAX_COMPONENTS]; guint depth[GST_VIDEO_MAX_COMPONENTS]; gint pixel_stride[GST_VIDEO_MAX_COMPONENTS]; guint n_planes; guint plane[GST_VIDEO_MAX_COMPONENTS]; guint poffset[GST_VIDEO_MAX_COMPONENTS]; guint w_sub[GST_VIDEO_MAX_COMPONENTS]; guint h_sub[GST_VIDEO_MAX_COMPONENTS]; GstVideoFormat unpack_format; GstVideoFormatUnpack unpack_func; gint pack_lines; GstVideoFormatPack pack_func; GstVideoTileMode tile_mode; guint tile_ws; guint tile_hs; };
Information for a video format.
GstVideoFormat |
GstVideoFormat |
string representation of the format | |
use readable description of the format | |
GstVideoFormatFlags |
GstVideoFormatFlags |
The number of bits used to pack data items. This can be less than 8 when multiple pixels are stored in a byte. for values > 8 multiple bytes should be read according to the endianness flag before applying the shift and mask. | |
the number of components in the video format. | |
the number of bits to shift away to get the component data | |
the depth in bits for each component | |
the pixel stride of each component. This is the amount of bytes to the pixel immediately to the right. When bits < 8, the stride is expressed in bits. For 24-bit RGB, this would be 3 bytes, for example, while it would be 4 bytes for RGBx or ARGB. | |
the number of planes for this format. The number of planes can be less than the amount of components when multiple components are packed into one plane. | |
the plane number where a component can be found | |
the offset in the plane where the first pixel of the components can be found. | |
subsampling factor of the width for the component. Use GST_VIDEO_SUB_SCALE to scale a width. | |
subsampling factor of the height for the component. Use GST_VIDEO_SUB_SCALE to scale a height. | |
GstVideoFormat |
the format of the unpacked pixels. This format must have the GST_VIDEO_FORMAT_FLAG_UNPACK flag set. |
GstVideoFormatUnpack |
an unpack function for this format |
the amount of lines that will be packed | |
GstVideoFormatPack |
an pack function for this format |
GstVideoTileMode |
The tiling mode |
The width of a tile, in bytes, represented as a shift | |
The height of a tile, in bytes, represented as a shift |
typedef enum { GST_VIDEO_FORMAT_FLAG_YUV = (1 << 0), GST_VIDEO_FORMAT_FLAG_RGB = (1 << 1), GST_VIDEO_FORMAT_FLAG_GRAY = (1 << 2), GST_VIDEO_FORMAT_FLAG_ALPHA = (1 << 3), GST_VIDEO_FORMAT_FLAG_LE = (1 << 4), GST_VIDEO_FORMAT_FLAG_PALETTE = (1 << 5), GST_VIDEO_FORMAT_FLAG_COMPLEX = (1 << 6), GST_VIDEO_FORMAT_FLAG_UNPACK = (1 << 7), GST_VIDEO_FORMAT_FLAG_TILED = (1 << 8) } GstVideoFormatFlags;
The different video flags that a format info can have.
The video format is YUV, components are numbered 0=Y, 1=U, 2=V. | |
The video format is RGB, components are numbered 0=R, 1=G, 2=B. | |
The video is gray, there is one gray component with index 0. | |
The video format has an alpha components with the number 3. | |
The video format has data stored in little endianness. | |
The video format has a palette. The palette is stored in the second plane and indexes are stored in the first plane. | |
The video format has a complex layout that can't be described with the usual information in the GstVideoFormatInfo. | |
This format can be used in a GstVideoFormatUnpack and GstVideoFormatPack function. | |
The format is tiled, there is tiling information in the last plane. |
typedef enum { GST_VIDEO_PACK_FLAG_NONE = 0, GST_VIDEO_PACK_FLAG_TRUNCATE_RANGE = (1 << 0), GST_VIDEO_PACK_FLAG_INTERLACED = (1 << 1) } GstVideoPackFlags;
The different flags that can be used when packing and unpacking.
No flag | |
When the source has a smaller depth than the target format, set the least significant bits of the target to 0. This is likely sightly faster but less accurate. When this flag is not specified, the most significant bits of the source are duplicated in the least significant bits of the destination. | |
The source is interlaced. The unpacked format will be interlaced as well with each line containing information from alternating fields. (Since 1.2) |
void (*GstVideoFormatUnpack) (const GstVideoFormatInfo *info
,GstVideoPackFlags flags
,gpointer dest
,const gpointer data[GST_VIDEO_MAX_PLANES]
,const gint stride[GST_VIDEO_MAX_PLANES]
,gint x
,gint y
,gint width
);
Unpacks width
pixels from the given planes and strides containing data of
format info
. The pixels will be unpacked into dest
with each component
interleaved as per info
's unpack_format, which will usually be one of
GST_VIDEO_FORMAT_ARGB, GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_ARGB64 or
GST_VIDEO_FORMAT_AYUV64 depending on the format to unpack.
dest
should at least be big enough to hold width
* bytes_per_pixel bytes
where bytes_per_pixel relates to the unpack format and will usually be
either 4 or 8 depending on the unpack format. bytes_per_pixel will be
the same as the pixel stride for plane 0 for the above formats.
For subsampled formats, the components will be duplicated in the destination array. Reconstruction of the missing components can be performed in a separate step after unpacking.
|
a GstVideoFormatInfo |
|
flags to control the unpacking |
|
a destination array |
|
pointers to the data planes |
|
strides of the planes |
|
the x position in the image to start from |
|
the y position in the image to start from |
|
the amount of pixels to unpack. |
void (*GstVideoFormatPack) (const GstVideoFormatInfo *info
,GstVideoPackFlags flags
,const gpointer src
,gint sstride
,gpointer data[GST_VIDEO_MAX_PLANES]
,const gint stride[GST_VIDEO_MAX_PLANES]
,GstVideoChromaSite chroma_site
,gint y
,gint width
);
Packs width
pixels from src
to the given planes and strides in the
format info
. The pixels from source have each component interleaved
and will be packed into the planes in data
.
This function operates on pack_lines lines, meaning that src
should
contain at least pack_lines lines with a stride of sstride
and y
should be a multiple of pack_lines.
Subsampled formats will use the horizontally and vertically cosited component from the source. Subsampling should be performed before packing.
Because this function does not have a x coordinate, it is not possible to pack pixels starting from an unaligned position. For tiled images this means that packing should start from a tile coordinate. For subsampled formats this means that a complete pixel needs to be packed.
|
a GstVideoFormatInfo |
|
flags to control the packing |
|
a source array |
|
the source array stride |
|
pointers to the destination data planes |
|
strides of the destination planes |
|
the chroma siting of the target when subsampled (not used) |
|
the y position in the image to pack to |
|
the amount of pixels to pack. |
#define GST_VIDEO_FORMAT_INFO_IS_YUV(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_YUV)
#define GST_VIDEO_FORMAT_INFO_IS_RGB(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_RGB)
#define GST_VIDEO_FORMAT_INFO_IS_GRAY(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_GRAY)
#define GST_VIDEO_FORMAT_INFO_HAS_ALPHA(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_ALPHA)
#define GST_VIDEO_FORMAT_INFO_IS_LE(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_LE)
#define GST_VIDEO_FORMAT_INFO_HAS_PALETTE(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_PALETTE)
#define GST_VIDEO_FORMAT_INFO_IS_COMPLEX(info) ((info)->flags & GST_VIDEO_FORMAT_FLAG_COMPLEX)
#define GST_VIDEO_FORMAT_INFO_N_COMPONENTS(info) ((info)->n_components)
#define GST_VIDEO_FORMAT_INFO_PSTRIDE(info,c) ((info)->pixel_stride[c])
pixel stride for the given component. This is the amount of bytes to the pixel immediately to the right, so basically bytes from one pixel to the next. When bits < 8, the stride is expressed in bits.
Examples: for 24-bit RGB, the pixel stride would be 3 bytes, while it would be 4 bytes for RGBx or ARGB, and 8 bytes for ARGB64 or AYUV64. For planar formats such as I420 the pixel stride is usually 1. For YUY2 it would be 2 bytes.
|
a GstVideoInfo |
|
the component index |
#define GST_VIDEO_FORMAT_INFO_N_PLANES(info) ((info)->n_planes)
Number of planes. This is the number of planes the pixel layout is organized in in memory. The number of planes can be less than the number of components (e.g. Y,U,V,A or R, G, B, A) when multiple components are packed into one plane.
Examples: RGB/RGBx/RGBA: 1 plane, 3/3/4 components; I420: 3 planes, 3 components; NV21/NV12: 2 planes, 3 components.
|
a GstVideoInfo |
#define GST_VIDEO_FORMAT_INFO_PLANE(info,c) ((info)->plane[c])
Plane number where the given component can be found. A plane may contain data for multiple components.
|
a GstVideoInfo |
|
the component index |
#define GST_VIDEO_FORMAT_INFO_SCALE_WIDTH(info,c,w) GST_VIDEO_SUB_SCALE ((info)->w_sub[c],(w))
#define GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT(info,c,h) GST_VIDEO_SUB_SCALE ((info)->h_sub[c],(h))
#define GST_VIDEO_FORMAT_INFO_STRIDE(info,strides,comp) ((strides)[(info)->plane[comp]])
Row stride in bytes, that is number of bytes from the first pixel component of a row to the first pixel component in the next row. This might include some row padding (memory not actually used for anything, to make sure the beginning of the next row is aligned in a particular way).
|
a GstVideoInfo |
|
an array of strides |
|
the component index |
GstVideoFormat gst_video_format_from_masks (gint depth
,gint bpp
,gint endianness
,guint red_mask
,guint green_mask
,guint blue_mask
,guint alpha_mask
);
Find the GstVideoFormat for the given parameters.
|
the amount of bits used for a pixel |
|
the amount of bits used to store a pixel. This value is bigger than
depth
|
|
the endianness of the masks, G_LITTLE_ENDIAN or G_BIG_ENDIAN |
|
the red mask |
|
the green mask |
|
the blue mask |
|
the alpha mask, or 0 if no alpha mask |
Returns : |
a GstVideoFormat or GST_VIDEO_FORMAT_UNKNOWN when the parameters to not specify a known format. |
GstVideoFormat gst_video_format_from_fourcc (guint32 fourcc
);
Converts a FOURCC value into the corresponding GstVideoFormat. If the FOURCC cannot be represented by GstVideoFormat, GST_VIDEO_FORMAT_UNKNOWN is returned.
|
a FOURCC value representing raw YUV video |
Returns : |
the GstVideoFormat describing the FOURCC value |
guint32 gst_video_format_to_fourcc (GstVideoFormat format
);
Converts a GstVideoFormat value into the corresponding FOURCC. Only
a few YUV formats have corresponding FOURCC values. If format
has
no corresponding FOURCC value, 0 is returned.
|
a GstVideoFormat video format |
Returns : |
the FOURCC corresponding to format
|
GstVideoFormat gst_video_format_from_string (const gchar *format
);
Convert the format
string to its GstVideoFormat.
|
a format string |
Returns : |
the GstVideoFormat for format or GST_VIDEO_FORMAT_UNKNOWN when the
string is not a known format. |
const gchar * gst_video_format_to_string (GstVideoFormat format
);
Returns a string containing a descriptive name for the GstVideoFormat if there is one, or NULL otherwise.
|
a GstVideoFormat video format |
Returns : |
the name corresponding to format
|
const GstVideoFormatInfo * gst_video_format_get_info (GstVideoFormat format
);
Get the GstVideoFormatInfo for format
|
a GstVideoFormat |
Returns : |
The GstVideoFormatInfo for format . |
#define GST_VIDEO_CAPS_MAKE(format)
Generic caps string for video, for use in pad templates.
|
string format that describes the pixel layout, as string (e.g. "I420", "RGB", "YV12", "YUY2", "AYUV", etc.) |
typedef enum { GST_VIDEO_COLOR_RANGE_UNKNOWN = 0, GST_VIDEO_COLOR_RANGE_0_255, GST_VIDEO_COLOR_RANGE_16_235 } GstVideoColorRange;
Possible color range values. These constants are defined for 8 bit color values and can be scaled for other bit depths.
void gst_video_color_range_offsets (GstVideoColorRange range
,const GstVideoFormatInfo *info
,gint offset[GST_VIDEO_MAX_COMPONENTS]
,gint scale[GST_VIDEO_MAX_COMPONENTS]
);
Compute the offset and scale values for each component of info
. For each
component, (c[i] - offset[i]) / scale[i] will scale the component c[i] to the
range [0.0 .. 1.0].
The reverse operation (c[i] * scale[i]) + offset[i] can be used to convert
the component values in range [0.0 .. 1.0] back to their representation in
info
and range
.
|
a GstVideoColorRange |
|
a GstVideoFormatInfo |
|
output offsets. [out] |
|
output scale. [out] |
typedef enum { GST_VIDEO_COLOR_MATRIX_UNKNOWN = 0, GST_VIDEO_COLOR_MATRIX_RGB, GST_VIDEO_COLOR_MATRIX_FCC, GST_VIDEO_COLOR_MATRIX_BT709, GST_VIDEO_COLOR_MATRIX_BT601, GST_VIDEO_COLOR_MATRIX_SMPTE240M, GST_VIDEO_COLOR_MATRIX_BT2020 } GstVideoColorMatrix;
The color matrix is used to convert between Y'PbPr and non-linear RGB (R'G'B')
unknown matrix | |
identity matrix | |
FCC color matrix | |
ITU-R BT.709 color matrix | |
ITU-R BT.601 color matrix | |
SMPTE 240M color matrix | |
ITU-R BT.2020 color matrix. Since: 1.6. |
gboolean gst_video_color_matrix_get_Kr_Kb (GstVideoColorMatrix matrix
,gdouble *Kr
,gdouble *Kb
);
Get the coefficients used to convert between Y'PbPr and R'G'B' using matrix
.
When:
1 2 |
0.0 <= [Y',R',G',B'] <= 1.0) (-0.5 <= [Pb,Pr] <= 0.5) |
the general conversion is given by:
1 2 3 |
Y' = Kr*R' + (1-Kr-Kb)*G' + Kb*B' Pb = (B'-Y')/(2*(1-Kb)) Pr = (R'-Y')/(2*(1-Kr)) |
and the other way around:
1 2 3 |
R' = Y' + Cr*2*(1-Kr) G' = Y' - Cb*2*(1-Kb)*Kb/(1-Kr-Kb) - Cr*2*(1-Kr)*Kr/(1-Kr-Kb) B' = Y' + Cb*2*(1-Kb) |
|
a GstVideoColorMatrix |
|
result red channel coefficient |
|
result blue channel coefficient |
Returns : |
TRUE if matrix was a YUV color format and Kr and Kb contain valid
values. |
Since 1.6
typedef enum { GST_VIDEO_COLOR_PRIMARIES_UNKNOWN = 0, GST_VIDEO_COLOR_PRIMARIES_BT709, GST_VIDEO_COLOR_PRIMARIES_BT470M, GST_VIDEO_COLOR_PRIMARIES_BT470BG, GST_VIDEO_COLOR_PRIMARIES_SMPTE170M, GST_VIDEO_COLOR_PRIMARIES_SMPTE240M, GST_VIDEO_COLOR_PRIMARIES_FILM, GST_VIDEO_COLOR_PRIMARIES_BT2020, GST_VIDEO_COLOR_PRIMARIES_ADOBERGB } GstVideoColorPrimaries;
The color primaries define the how to transform linear RGB values to and from the CIE XYZ colorspace.
unknown color primaries | |
BT709 primaries | |
BT470M primaries | |
BT470BG primaries | |
SMPTE170M primaries | |
SMPTE240M primaries | |
Generic film | |
BT2020 primaries. Since: 1.6. | |
Adobe RGB primaries. Since: 1.8 |
typedef enum { GST_VIDEO_TRANSFER_UNKNOWN = 0, GST_VIDEO_TRANSFER_GAMMA10, GST_VIDEO_TRANSFER_GAMMA18, GST_VIDEO_TRANSFER_GAMMA20, GST_VIDEO_TRANSFER_GAMMA22, GST_VIDEO_TRANSFER_BT709, GST_VIDEO_TRANSFER_SMPTE240M, GST_VIDEO_TRANSFER_SRGB, GST_VIDEO_TRANSFER_GAMMA28, GST_VIDEO_TRANSFER_LOG100, GST_VIDEO_TRANSFER_LOG316, GST_VIDEO_TRANSFER_BT2020_12, GST_VIDEO_TRANSFER_ADOBERGB } GstVideoTransferFunction;
The video transfer function defines the formula for converting between non-linear RGB (R'G'B') and linear RGB
unknown transfer function | |
linear RGB, gamma 1.0 curve | |
Gamma 1.8 curve | |
Gamma 2.0 curve | |
Gamma 2.2 curve | |
Gamma 2.2 curve with a linear segment in the lower range | |
Gamma 2.2 curve with a linear segment in the lower range | |
Gamma 2.4 curve with a linear segment in the lower range | |
Gamma 2.8 curve | |
Logarithmic transfer characteristic 100:1 range | |
Logarithmic transfer characteristic 316.22777:1 range | |
Gamma 2.2 curve with a linear segment in the lower range. Used for BT.2020 with 12 bits per component. Since: 1.6. | |
Gamma 2.19921875. Since: 1.8 |
gdouble gst_video_color_transfer_decode (GstVideoTransferFunction func
,gdouble val
);
Convert val
to its gamma decoded value. This is the inverse operation of
.
gst_video_color_transfer_encode()
For a non-linear value L' in the range [0..1], conversion to the linear L is in general performed with a power function like:
1 |
L = L' ^ gamma |
Depending on func
, different formulas might be applied. Some formulas
encode a linear segment in the lower range.
|
a GstVideoTransferFunction |
|
a value |
Returns : |
the gamme decoded value of val
|
Since 1.6
gdouble gst_video_color_transfer_encode (GstVideoTransferFunction func
,gdouble val
);
Convert val
to its gamma encoded value.
For a linear value L in the range [0..1], conversion to the non-linear (gamma encoded) L' is in general performed with a power function like:
1 |
L' = L ^ (1 / gamma) |
Depending on func
, different formulas might be applied. Some formulas
encode a linear segment in the lower range.
|
a GstVideoTransferFunction |
|
a value |
Returns : |
the gamme encoded value of val
|
Since 1.6
typedef struct { GstVideoColorRange range; GstVideoColorMatrix matrix; GstVideoTransferFunction transfer; GstVideoColorPrimaries primaries; } GstVideoColorimetry;
Structure describing the color info.
GstVideoColorRange |
the color range. This is the valid range for the samples. It is used to convert the samples to Y'PbPr values. |
GstVideoColorMatrix |
the color matrix. Used to convert between Y'PbPr and non-linear RGB (R'G'B') |
GstVideoTransferFunction |
the transfer function. used to convert between R'G'B' and RGB |
GstVideoColorPrimaries |
color primaries. used to convert between R'G'B' and CIE XYZ |
gboolean gst_video_colorimetry_matches (GstVideoColorimetry *cinfo
,const gchar *color
);
Check if the colorimetry information in info
matches that of the
string color
.
|
a GstVideoInfo |
|
a colorimetry string |
Returns : |
TRUE if color conveys the same colorimetry info as the color
information in info . |
gboolean gst_video_colorimetry_is_equal (const GstVideoColorimetry *cinfo
,const GstVideoColorimetry *other
);
Compare the 2 colorimetry sets for equality
|
a GstVideoColorimetry |
|
another GstVideoColorimetry |
Returns : |
TRUE if cinfo and other are equal. |
Since 1.6
gboolean gst_video_colorimetry_from_string (GstVideoColorimetry *cinfo
,const gchar *color
);
Parse the colorimetry string and update cinfo
with the parsed
values.
|
a GstVideoColorimetry |
|
a colorimetry string |
Returns : |
TRUE if color points to valid colorimetry info. |
gchar * gst_video_colorimetry_to_string (GstVideoColorimetry *cinfo
);
Make a string representation of cinfo
.
|
a GstVideoColorimetry |
Returns : |
a string representation of cinfo . |
struct GstVideoInfo { const GstVideoFormatInfo *finfo; GstVideoInterlaceMode interlace_mode; GstVideoFlags flags; gint width; gint height; gsize size; gint views; GstVideoChromaSite chroma_site; GstVideoColorimetry colorimetry; gint par_n; gint par_d; gint fps_n; gint fps_d; gsize offset[GST_VIDEO_MAX_PLANES]; gint stride[GST_VIDEO_MAX_PLANES]; /* Union preserves padded struct size for backwards compat * Consumer code should use the accessor macros for fields */ union { struct { GstVideoMultiviewMode multiview_mode; GstVideoMultiviewFlags multiview_flags; } abi; };
Information describing image properties. This information can be filled
in from GstCaps with gst_video_info_from_caps()
. The information is also used
to store the specific video info when mapping a video frame with
gst_video_frame_map()
.
Use the provided macros to access the info in this structure.
const GstVideoFormatInfo * |
the format info of the video |
GstVideoInterlaceMode |
the interlace mode |
GstVideoFlags |
additional video flags |
the width of the video | |
the height of the video | |
the default size of one frame | |
the number of views for multiview video | |
GstVideoChromaSite |
a GstVideoChromaSite. |
GstVideoColorimetry |
the colorimetry info |
the pixel-aspect-ratio numerator | |
the pixel-aspect-ratio demnominator | |
the framerate numerator | |
the framerate demnominator | |
offsets of the planes | |
strides of the planes |
typedef enum { GST_VIDEO_INTERLACE_MODE_PROGRESSIVE = 0, GST_VIDEO_INTERLACE_MODE_INTERLEAVED, GST_VIDEO_INTERLACE_MODE_MIXED, GST_VIDEO_INTERLACE_MODE_FIELDS } GstVideoInterlaceMode;
The possible values of the GstVideoInterlaceMode describing the interlace mode of the stream.
all frames are progressive | |
2 fields are interleaved in one video frame. Extra buffer flags describe the field order. | |
frames contains both interlaced and progressive video, the buffer flags describe the frame and fields. | |
2 fields are stored in one buffer, use the frame ID to get access to the required field. For multiview (the 'views' property > 1) the fields of view N can be found at frame ID (N * 2) and (N * 2) + 1. Each field has only half the amount of lines as noted in the height property. This mode requires multiple GstVideoMeta metadata to describe the fields. |
typedef enum { GST_VIDEO_MULTIVIEW_MODE_NONE = -1, GST_VIDEO_MULTIVIEW_MODE_MONO = 0, /* Single view modes */ GST_VIDEO_MULTIVIEW_MODE_LEFT, GST_VIDEO_MULTIVIEW_MODE_RIGHT, /* Stereo view modes */ GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE, GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE_QUINCUNX, GST_VIDEO_MULTIVIEW_MODE_COLUMN_INTERLEAVED, GST_VIDEO_MULTIVIEW_MODE_ROW_INTERLEAVED, GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM, GST_VIDEO_MULTIVIEW_MODE_CHECKERBOARD, /* Padding for new frame packing modes */ GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME = 32, /* Multivew mode(s) */ GST_VIDEO_MULTIVIEW_MODE_MULTIVIEW_FRAME_BY_FRAME, GST_VIDEO_MULTIVIEW_MODE_SEPARATED /* future expansion for annotated modes */ } GstVideoMultiviewMode;
All possible stereoscopic 3D and multiview representations. In conjunction with GstVideoMultiviewFlags, describes how multiview content is being transported in the stream.
A special value indicating no multiview information. Used in GstVideoInfo and other places to indicate that no specific multiview handling has been requested or provided. This value is never carried on caps. | |
All frames are monoscopic. | |
All frames represent a left-eye view. | |
All frames represent a right-eye view. | |
Left and right eye views are provided in the left and right half of the frame respectively. | |
Left and right eye views are provided in the left and right half of the frame, but have been sampled using quincunx method, with half-pixel offset between the 2 views. | |
Alternating vertical columns of pixels represent the left and right eye view respectively. | |
Alternating horizontal rows of pixels represent the left and right eye view respectively. | |
The top half of the frame contains the left eye, and the bottom half the right eye. | |
Pixels are arranged with alternating pixels representing left and right eye views in a checkerboard fashion. | |
Left and right eye views are provided in separate frames alternately. | |
Multiple independent views are provided in separate frames in sequence. This method only applies to raw video buffers at the moment. Specific view identification is via the GstVideoMultiviewMeta and GstVideoMeta(s) on raw video buffers. | |
Multiple views are provided as separate GstMemory framebuffers attached to each GstBuffer, described by the GstVideoMultiviewMeta and GstVideoMeta(s) |
typedef enum { GST_VIDEO_MULTIVIEW_FRAME_PACKING_NONE = GST_VIDEO_MULTIVIEW_MODE_NONE, GST_VIDEO_MULTIVIEW_FRAME_PACKING_MONO = GST_VIDEO_MULTIVIEW_MODE_MONO, GST_VIDEO_MULTIVIEW_FRAME_PACKING_LEFT = GST_VIDEO_MULTIVIEW_MODE_LEFT, GST_VIDEO_MULTIVIEW_FRAME_PACKING_RIGHT = GST_VIDEO_MULTIVIEW_MODE_RIGHT, GST_VIDEO_MULTIVIEW_FRAME_PACKING_SIDE_BY_SIDE = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE, GST_VIDEO_MULTIVIEW_FRAME_PACKING_SIDE_BY_SIDE_QUINCUNX = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE_QUINCUNX, GST_VIDEO_MULTIVIEW_FRAME_PACKING_COLUMN_INTERLEAVED = GST_VIDEO_MULTIVIEW_MODE_COLUMN_INTERLEAVED, GST_VIDEO_MULTIVIEW_FRAME_PACKING_ROW_INTERLEAVED = GST_VIDEO_MULTIVIEW_MODE_ROW_INTERLEAVED, GST_VIDEO_MULTIVIEW_FRAME_PACKING_TOP_BOTTOM = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM, GST_VIDEO_MULTIVIEW_FRAME_PACKING_CHECKERBOARD = GST_VIDEO_MULTIVIEW_MODE_CHECKERBOARD } GstVideoMultiviewFramePacking;
GstVideoMultiviewFramePacking represents the subset of GstVideoMultiviewMode values that can be applied to any video frame without needing extra metadata. It can be used by elements that provide a property to override the multiview interpretation of a video stream when the video doesn't contain any markers.
This enum is used (for example) on playbin, to re-interpret a played video stream as a stereoscopic video. The individual enum values are equivalent to and have the same value as the matching GstVideoMultiviewMode.
A special value indicating no frame packing info. | |
All frames are monoscopic. | |
All frames represent a left-eye view. | |
All frames represent a right-eye view. | |
Left and right eye views are provided in the left and right half of the frame respectively. | |
Left and right eye views are provided in the left and right half of the frame, but have been sampled using quincunx method, with half-pixel offset between the 2 views. | |
Alternating vertical columns of pixels represent the left and right eye view respectively. | |
Alternating horizontal rows of pixels represent the left and right eye view respectively. | |
The top half of the frame contains the left eye, and the bottom half the right eye. | |
Pixels are arranged with alternating pixels representing left and right eye views in a checkerboard fashion. |
typedef enum { GST_VIDEO_MULTIVIEW_FLAGS_NONE = 0, GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST = (1 << 0), GST_VIDEO_MULTIVIEW_FLAGS_LEFT_FLIPPED = (1 << 1), GST_VIDEO_MULTIVIEW_FLAGS_LEFT_FLOPPED = (1 << 2), GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_FLIPPED = (1 << 3), GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_FLOPPED = (1 << 4), GST_VIDEO_MULTIVIEW_FLAGS_HALF_ASPECT = (1 << 14), GST_VIDEO_MULTIVIEW_FLAGS_MIXED_MONO = (1 << 15) } GstVideoMultiviewFlags;
GstVideoMultiviewFlags are used to indicate extra properties of a stereo/multiview stream beyond the frame layout and buffer mapping that is conveyed in the GstMultiviewMode.
No flags | |
For stereo streams, the normal arrangement of left and right views is reversed. | |
The left view is vertically mirrored. | |
The left view is horizontally mirrored. | |
The right view is vertically mirrored. | |
The right view is horizontally mirrored. | |
For frame-packed multiview modes, indicates that the individual views have been encoded with half the true width or height and should be scaled back up for display. This flag is used for overriding input layout interpretation by adjusting pixel-aspect-ratio. For side-by-side, column interleaved or checkerboard packings, the pixel width will be doubled. For row interleaved and top-bottom encodings, pixel height will be doubled. | |
The video stream contains both
mono and multiview portions, signalled on each buffer by the
absence or presence of the GST_VIDEO_BUFFER_FLAG_MULTIPLE_VIEW
buffer flag.
|
typedef enum { GST_VIDEO_FLAG_NONE = 0, GST_VIDEO_FLAG_VARIABLE_FPS = (1 << 0), GST_VIDEO_FLAG_PREMULTIPLIED_ALPHA = (1 << 1) } GstVideoFlags;
Extra video flags
#define GST_VIDEO_INFO_IS_GRAY(i) (GST_VIDEO_FORMAT_INFO_IS_GRAY((i)->finfo))
#define GST_VIDEO_INFO_HAS_ALPHA(i) (GST_VIDEO_FORMAT_INFO_HAS_ALPHA((i)->finfo))
#define GST_VIDEO_INFO_IS_INTERLACED(i) ((i)->interlace_mode != GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
#define GST_VIDEO_INFO_FLAG_IS_SET(i,flag) ((GST_VIDEO_INFO_FLAGS(i) & (flag)) == (flag))
#define GST_VIDEO_INFO_FLAG_SET(i,flag) (GST_VIDEO_INFO_FLAGS(i) |= (flag))
#define GST_VIDEO_INFO_FLAG_UNSET(i,flag) (GST_VIDEO_INFO_FLAGS(i) &= ~(flag))
#define GST_VIDEO_INFO_N_PLANES(i) (GST_VIDEO_FORMAT_INFO_N_PLANES((i)->finfo))
#define GST_VIDEO_INFO_N_COMPONENTS(i) GST_VIDEO_FORMAT_INFO_N_COMPONENTS((i)->finfo)
#define GST_VIDEO_INFO_COMP_DEPTH(i,c) GST_VIDEO_FORMAT_INFO_DEPTH((i)->finfo,(c))
#define GST_VIDEO_INFO_COMP_DATA(i,d,c) GST_VIDEO_FORMAT_INFO_DATA((i)->finfo,d,(c))
#define GST_VIDEO_INFO_COMP_OFFSET(i,c) GST_VIDEO_FORMAT_INFO_OFFSET((i)->finfo,(i)->offset,(c))
#define GST_VIDEO_INFO_COMP_STRIDE(i,c) GST_VIDEO_FORMAT_INFO_STRIDE((i)->finfo,(i)->stride,(c))
#define GST_VIDEO_INFO_COMP_WIDTH(i,c) GST_VIDEO_FORMAT_INFO_SCALE_WIDTH((i)->finfo,(c),(i)->width)
#define GST_VIDEO_INFO_COMP_HEIGHT(i,c) GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT((i)->finfo,(c),(i)->height)
#define GST_VIDEO_INFO_COMP_PLANE(i,c) GST_VIDEO_FORMAT_INFO_PLANE((i)->finfo,(c))
#define GST_VIDEO_INFO_COMP_PSTRIDE(i,c) GST_VIDEO_FORMAT_INFO_PSTRIDE((i)->finfo,(c))
#define GST_VIDEO_INFO_COMP_POFFSET(i,c) GST_VIDEO_FORMAT_INFO_POFFSET((i)->finfo,(c))
#define GST_VIDEO_INFO_MULTIVIEW_FLAGS(i) ((i)->ABI.abi.multiview_flags)
#define GST_VIDEO_INFO_MULTIVIEW_MODE(i) ((i)->ABI.abi.multiview_mode)
void gst_video_info_init (GstVideoInfo *info
);
Initialize info
with default values.
|
a GstVideoInfo |
GstVideoInfo * gst_video_info_new (void
);
Allocate a new GstVideoInfo that is also initialized with
gst_video_info_init()
.
Returns : |
a new GstVideoInfo. free with gst_video_info_free() . |
Since 1.6
GstVideoInfo * gst_video_info_copy (const GstVideoInfo *info
);
Copy a GstVideoInfo structure.
|
a GstVideoInfo |
Returns : |
a new GstVideoInfo. free with gst_video_info_free. |
Since 1.6
void gst_video_info_free (GstVideoInfo *info
);
Free a GstVideoInfo structure previously allocated with gst_video_info_new()
or gst_video_info_copy()
.
|
a GstVideoInfo |
Since 1.6
void gst_video_info_set_format (GstVideoInfo *info
,GstVideoFormat format
,guint width
,guint height
);
Set the default info for a video frame of format
and width
and height
.
Note: This initializes info
first, no values are preserved. This function
does not set the offsets correctly for interlaced vertically
subsampled formats.
|
a GstVideoInfo |
|
the format |
|
a width |
|
a height |
gboolean gst_video_info_from_caps (GstVideoInfo *info
,const GstCaps *caps
);
Parse caps
and update info
.
|
a GstVideoInfo |
|
a GstCaps |
Returns : |
TRUE if caps could be parsed |
GstCaps * gst_video_info_to_caps (GstVideoInfo *info
);
Convert the values of info
into a GstCaps.
|
a GstVideoInfo |
Returns : |
a new GstCaps containing the info of info . |
gboolean gst_video_info_convert (GstVideoInfo *info
,GstFormat src_format
,gint64 src_value
,GstFormat dest_format
,gint64 *dest_value
);
Converts among various GstFormat types. This function handles GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT. For raw video, GST_FORMAT_DEFAULT corresponds to video frames. This function can be used to handle pad queries of the type GST_QUERY_CONVERT.
|
a GstVideoInfo |
|
GstFormat of the src_value
|
|
value to convert |
|
GstFormat of the dest_value
|
|
pointer to destination value |
Returns : |
TRUE if the conversion was successful. |
gboolean gst_video_info_is_equal (const GstVideoInfo *info
,const GstVideoInfo *other
);
Compares two GstVideoInfo and returns whether they are equal or not
|
a GstVideoInfo |
|
a GstVideoInfo |
Returns : |
TRUE if info and other are equal, else FALSE . |
void gst_video_info_align (GstVideoInfo *info
,GstVideoAlignment *align
);
Adjust the offset and stride fields in info
so that the padding and
stride alignment in align
is respected.
Extra padding will be added to the right side when stride alignment padding
is required and align
will be updated with the new padding values.
|
a GstVideoInfo |
|
alignment parameters |
struct GstVideoFrame { GstVideoInfo info; GstVideoFrameFlags flags; GstBuffer *buffer; gpointer meta; gint id; gpointer data[GST_VIDEO_MAX_PLANES]; GstMapInfo map[GST_VIDEO_MAX_PLANES]; };
A video frame obtained from gst_video_frame_map()
GstVideoInfo |
the GstVideoInfo |
GstVideoFrameFlags |
GstVideoFrameFlags for the frame |
the mapped buffer | |
pointer to metadata if any | |
id of the mapped frame. the id can for example be used to indentify the frame in case of multiview video. | |
pointers to the plane data | |
mappings of the planes |
typedef enum { GST_VIDEO_FRAME_FLAG_NONE = 0, GST_VIDEO_FRAME_FLAG_INTERLACED = (1 << 0), GST_VIDEO_FRAME_FLAG_TFF = (1 << 1), GST_VIDEO_FRAME_FLAG_RFF = (1 << 2), GST_VIDEO_FRAME_FLAG_ONEFIELD = (1 << 3), GST_VIDEO_FRAME_FLAG_MULTIPLE_VIEW = (1 << 4), GST_VIDEO_FRAME_FLAG_FIRST_IN_BUNDLE = (1 << 5) } GstVideoFrameFlags;
Extra video frame flags
no flags | |
The video frame is interlaced. In mixed interlace-mode, this flag specifies if the frame is interlaced or progressive. | |
The video frame has the top field first | |
The video frame has the repeat flag | |
The video frame has one field | |
The video contains one or more non-mono views | |
The video frame is the first in a set of corresponding views provided as sequential frames. |
gboolean gst_video_frame_map_id (GstVideoFrame *frame
,GstVideoInfo *info
,GstBuffer *buffer
,gint id
,GstMapFlags flags
);
Use info
and buffer
to fill in the values of frame
with the video frame
information of frame id
.
When id
is -1, the default frame is mapped. When id
!= -1, this function
will return FALSE
when there is no GstVideoMeta with that id.
All video planes of buffer
will be mapped and the pointers will be set in
frame->data
.
|
pointer to GstVideoFrame |
|
a GstVideoInfo |
|
the buffer to map |
|
the frame id to map |
|
GstMapFlags |
Returns : |
TRUE on success. |
gboolean gst_video_frame_map (GstVideoFrame *frame
,GstVideoInfo *info
,GstBuffer *buffer
,GstMapFlags flags
);
Use info
and buffer
to fill in the values of frame
. frame
is usually
allocated on the stack, and you will pass the address to the GstVideoFrame
structure allocated on the stack; gst_video_frame_map()
will then fill in
the structures with the various video-specific information you need to access
the pixels of the video buffer. You can then use accessor macros such as
GST_VIDEO_FRAME_COMP_DATA()
, GST_VIDEO_FRAME_PLANE_DATA()
,
GST_VIDEO_FRAME_COMP_STRIDE()
, GST_VIDEO_FRAME_PLANE_STRIDE()
etc.
to get to the pixels.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
GstVideoFrame vframe; ... // set RGB pixels to black one at a time if (gst_video_frame_map (&vframe, video_info, video_buffer)) { guint8 *pixels = GST_VIDEO_FRAME_PLANE_DATA (vframe, 0); guint stride = GST_VIDEO_FRAME_PLANE_STRIDE (vframe, 0); guint pixel_stride = GST_VIDEO_FRAME_PLANE_PSTRIDE (vframe, 0); for (h = 0; h < height; ++h) { for (w = 0; w < width; ++w) { guint8 *pixel = pixels + h * stride + w * pixel_stride; memset (pixel, 0, pixel_stride); } } } ... |
All video planes of buffer
will be mapped and the pointers will be set in
frame->data
.
The purpose of this function is to make it easy for you to get to the video pixels in a generic way, without you having to worry too much about details such as whether the video data is allocated in one contiguous memory chunk or multiple memory chunks (e.g. one for each plane); or if custom strides and custom plane offsets are used or not (as signalled by GstVideoMeta on each buffer). This function will just fill the GstVideoFrame structure with the right values and if you use the accessor macros everything will just work and you can access the data easily. It also maps the underlying memory chunks for you.
|
pointer to GstVideoFrame |
|
a GstVideoInfo |
|
the buffer to map |
|
GstMapFlags |
Returns : |
TRUE on success. |
void gst_video_frame_unmap (GstVideoFrame *frame
);
Unmap the memory previously mapped with gst_video_frame_map.
|
a GstVideoFrame |
gboolean gst_video_frame_copy (GstVideoFrame *dest
,const GstVideoFrame *src
);
Copy the contents from src
to dest
.
|
a GstVideoFrame |
|
a GstVideoFrame |
Returns : |
TRUE if the contents could be copied. |
gboolean gst_video_frame_copy_plane (GstVideoFrame *dest
,const GstVideoFrame *src
,guint plane
);
Copy the plane with index plane
from src
to dest
.
|
a GstVideoFrame |
|
a GstVideoFrame |
|
a plane |
Returns : |
TRUE if the contents could be copied. |
#define GST_VIDEO_FRAME_FLAG_IS_SET(f,fl) ((GST_VIDEO_FRAME_FLAGS(f) & (fl)) == (fl))
#define GST_VIDEO_FRAME_IS_INTERLACED(f) (GST_VIDEO_FRAME_FLAG_IS_SET(f, GST_VIDEO_FRAME_FLAG_INTERLACED))
#define GST_VIDEO_FRAME_IS_TFF(f) (GST_VIDEO_FRAME_FLAG_IS_SET(f, GST_VIDEO_FRAME_FLAG_TFF))
#define GST_VIDEO_FRAME_IS_RFF(f) (GST_VIDEO_FRAME_FLAG_IS_SET(f, GST_VIDEO_FRAME_FLAG_RFF))
#define GST_VIDEO_FRAME_IS_ONEFIELD(f) (GST_VIDEO_FRAME_FLAG_IS_SET(f, GST_VIDEO_FRAME_FLAG_ONEFIELD))
#define GST_VIDEO_FRAME_N_PLANES(f) (GST_VIDEO_INFO_N_PLANES(&(f)->info))
#define GST_VIDEO_FRAME_PLANE_OFFSET(f,p) (GST_VIDEO_INFO_PLANE_OFFSET(&(f)->info,(p)))
#define GST_VIDEO_FRAME_PLANE_STRIDE(f,p) (GST_VIDEO_INFO_PLANE_STRIDE(&(f)->info,(p)))
#define GST_VIDEO_FRAME_N_COMPONENTS(f) GST_VIDEO_INFO_N_COMPONENTS(&(f)->info)
#define GST_VIDEO_FRAME_COMP_DEPTH(f,c) GST_VIDEO_INFO_COMP_DEPTH(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_DATA(f,c) GST_VIDEO_INFO_COMP_DATA(&(f)->info,(f)->data,(c))
#define GST_VIDEO_FRAME_COMP_STRIDE(f,c) GST_VIDEO_INFO_COMP_STRIDE(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_OFFSET(f,c) GST_VIDEO_INFO_COMP_OFFSET(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_WIDTH(f,c) GST_VIDEO_INFO_COMP_WIDTH(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_HEIGHT(f,c) GST_VIDEO_INFO_COMP_HEIGHT(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_PLANE(f,c) GST_VIDEO_INFO_COMP_PLANE(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_PSTRIDE(f,c) GST_VIDEO_INFO_COMP_PSTRIDE(&(f)->info,(c))
#define GST_VIDEO_FRAME_COMP_POFFSET(f,c) GST_VIDEO_INFO_COMP_POFFSET(&(f)->info,(c))
typedef enum { GST_VIDEO_BUFFER_FLAG_INTERLACED = (GST_BUFFER_FLAG_LAST << 0), GST_VIDEO_BUFFER_FLAG_TFF = (GST_BUFFER_FLAG_LAST << 1), GST_VIDEO_BUFFER_FLAG_RFF = (GST_BUFFER_FLAG_LAST << 2), GST_VIDEO_BUFFER_FLAG_ONEFIELD = (GST_BUFFER_FLAG_LAST << 3), GST_VIDEO_BUFFER_FLAG_MULTIPLE_VIEW = (GST_BUFFER_FLAG_LAST << 4), GST_VIDEO_BUFFER_FLAG_FIRST_IN_BUNDLE = (GST_BUFFER_FLAG_LAST << 5), GST_VIDEO_BUFFER_FLAG_LAST = (GST_BUFFER_FLAG_LAST << 8) } GstVideoBufferFlags;
Additional video buffer flags. These flags can potentially be used on any buffers carrying video data - even encoded data.
If the GstBuffer is interlaced. In mixed interlace-mode, this flags specifies if the frame is interlaced or progressive. | |
If the GstBuffer is interlaced, then the first field in the video frame is the top field. If unset, the bottom field is first. | |
If the GstBuffer is interlaced, then the first field
(as defined by the GST_VIDEO_BUFFER_TFF flag setting)
is repeated.
|
|
If the GstBuffer is interlaced, then only the
first field (as defined by the GST_VIDEO_BUFFER_TFF
flag setting) is to be displayed.
|
|
The GstBuffer contains one or more specific views, such as left or right eye view. This flags is set on any buffer that contains non-mono content - even for streams that contain only a single viewpoint. In mixed mono / non-mono streams, the absense of the flag marks mono buffers. | |
When conveying stereo/multiview content with frame-by-frame methods, this flag marks the first buffer in a bundle of frames that belong together. | |
Offset to define more flags |
typedef enum { GST_VIDEO_TILE_TYPE_INDEXED = 0 } GstVideoTileType;
Enum value describing the most common tiling types.
Tiles are indexed. Use
gst_video_tile_get_index() to retrieve the tile at the requested
coordinates.
|
typedef enum { GST_VIDEO_TILE_MODE_UNKNOWN = 0, GST_VIDEO_TILE_MODE_ZFLIPZ_2X2 = GST_VIDEO_TILE_MAKE_MODE (1, INDEXED), } GstVideoTileMode;
Enum value describing the available tiling modes.
guint gst_video_tile_get_index (GstVideoTileMode mode
,gint x
,gint y
,gint x_tiles
,gint y_tiles
);
Get the tile index of the tile at coordinates x
and y
in the tiled
image of x_tiles
by y_tiles
.
Use this method when mode
is of type GST_VIDEO_TILE_MODE_INDEXED
.
|
a GstVideoTileMode |
|
x coordinate |
|
y coordinate |
|
number of horizintal tiles |
|
number of vertical tiles |
Returns : |
the index of the tile at x and y in the tiled image of
x_tiles by y_tiles . |
Since 1.4
#define GST_VIDEO_TILE_MAKE_MODE(num, type)
use this macro to create new tile modes.
|
the mode number to create |
|
the tile mode type |
#define GST_VIDEO_TILE_MODE_TYPE(mode) ((mode) & GST_VIDEO_TILE_TYPE_MASK)
Get the tile mode type of mode
|
the tile mode |
#define GST_VIDEO_TILE_MODE_IS_INDEXED(mode) (GST_VIDEO_TILE_MODE_TYPE(mode) == GST_VIDEO_TILE_TYPE_INDEXED)
Check if mode
is an indexed tile type
|
a tile mode |
#define GST_VIDEO_TILE_MAKE_STRIDE(x_tiles, y_tiles)
Encode the number of tile in X and Y into the stride.
|
number of tiles in X |
|
number of tiles in Y |
#define GST_VIDEO_TILE_X_TILES(stride) ((stride) & GST_VIDEO_TILE_X_TILES_MASK)
Extract the number of tiles in X from the stride value.
|
plane stride |
#define GST_VIDEO_TILE_Y_TILES(stride) ((stride) >> GST_VIDEO_TILE_Y_TILES_SHIFT)
Extract the number of tiles in Y from the stride value.
|
plane stride |
gboolean gst_video_blend (GstVideoFrame *dest
,GstVideoFrame *src
,gint x
,gint y
,gfloat global_alpha
);
Lets you blend the src
image into the dest
image
|
The GstVideoFrame where to blend src in |
|
the GstVideoFrame that we want to blend into |
|
The x offset in pixel where the src image should be blended |
|
the y offset in pixel where the src image should be blended |
|
the global_alpha each per-pixel alpha value is multiplied with |
void gst_video_blend_scale_linear_RGBA (GstVideoInfo *src
,GstBuffer *src_buffer
,gint dest_height
,gint dest_width
,GstVideoInfo *dest
,GstBuffer **dest_buffer
);
Scales a buffer containing RGBA (or AYUV) video. This is an internal helper function which is used to scale subtitle overlays, and may be deprecated in the near future. Use GstVideoScaler to scale video buffers instead.
|
the GstVideoInfo describing the video data in src_buffer
|
|
the source buffer containing video pixels to scale |
|
the height in pixels to scale the video data in src_buffer to |
|
the width in pixels to scale the video data in src_buffer to |
|
pointer to a GstVideoInfo structure that will be filled in
with the details for dest_buffer . [out]
|
|
a pointer to a GstBuffer variable, which will be set to a newly-allocated buffer containing the scaled pixels. [out] |
GstVideoConverter * gst_video_converter_new (GstVideoInfo *in_info
,GstVideoInfo *out_info
,GstStructure *config
);
Create a new converter object to convert between in_info
and out_info
with config
.
|
a GstVideoInfo |
|
a GstVideoInfo |
|
a GstStructure with configuration options. [transfer full] |
Returns : |
a GstVideoConverter or NULL if conversion is not possible. |
Since 1.6
void gst_video_converter_free (GstVideoConverter *convert
);
Free convert
|
a GstVideoConverter |
Since 1.6
const GstStructure * gst_video_converter_get_config (GstVideoConverter *convert
);
Get the current configuration of convert
.
|
a GstVideoConverter |
Returns : |
a GstStructure that remains valid for as long as convert is valid
or until gst_video_converter_set_config() is called. |
gboolean gst_video_converter_set_config (GstVideoConverter *convert
,GstStructure *config
);
Set config
as extra configuraion for convert
.
If the parameters in config
can not be set exactly, this function returns
FALSE
and will try to update as much state as possible. The new state can
then be retrieved and refined with gst_video_converter_get_config()
.
Look at the GST_VIDEO_CONVERTER_OPT_* fields to check valid configuration option and values.
|
a GstVideoConverter |
|
a GstStructure. [transfer full] |
Returns : |
TRUE when config could be set. |
Since 1.6
void gst_video_converter_frame (GstVideoConverter *convert
,const GstVideoFrame *src
,GstVideoFrame *dest
);
Convert the pixels of src
into dest
using convert
.
|
a GstVideoConverter |
|
a GstVideoFrame |
|
a GstVideoFrame |
Since 1.6
const GValue * gst_video_multiview_get_mono_modes (void
);
Returns : |
A const GValue containing a list of mono video modes Utility function that returns a GValue with a GstList of mono video modes (mono/left/right) for use in caps negotiations. |
Since 1.6
const GValue * gst_video_multiview_get_unpacked_modes
(void
);
Returns : |
A const GValue containing a list of 'unpacked' stereo video modes Utility function that returns a GValue with a GstList of unpacked stereo video modes (separated/frame-by-frame/frame-by-frame-multiview) for use in caps negotiations. |
Since 1.6
const GValue * gst_video_multiview_get_doubled_height_modes
(void
);
Returns : |
A const GValue containing a list of stereo video modes Utility function that returns a GValue with a GstList of packed stereo video modes with double the height of a single view for use in caps negotiations. Currently this is top-bottom and row-interleaved. |
Since 1.6
const GValue * gst_video_multiview_get_doubled_size_modes
(void
);
Returns : |
A const GValue containing a list of stereo video modes Utility function that returns a GValue with a GstList of packed stereo video modes that have double the width/height of a single view for use in caps negotiation. Currently this is just 'checkerboard' layout. |
Since 1.6
const GValue * gst_video_multiview_get_doubled_width_modes
(void
);
Returns : |
A const GValue containing a list of stereo video modes Utility function that returns a GValue with a GstList of packed stereo video modes with double the width of a single view for use in caps negotiations. Currently this is side-by-side, side-by-side-quincunx and column-interleaved. |
Since 1.6
GstVideoMultiviewMode gst_video_multiview_mode_from_caps_string
(const gchar *caps_mview_mode
);
|
multiview-mode field string from caps |
Returns : |
The GstVideoMultiviewMode value Given a string from a caps multiview-mode field, output the corresponding GstVideoMultiviewMode or GST_VIDEO_MULTIVIEW_MODE_NONE |
Since 1.6
const gchar * gst_video_multiview_mode_to_caps_string
(GstVideoMultiviewMode mview_mode
);
|
A GstVideoMultiviewMode value |
Returns : |
The caps string representation of the mode, or NULL if invalid. Given a GstVideoMultiviewMode returns the multiview-mode caps string for insertion into a caps structure |
Since 1.6
gboolean gst_video_multiview_guess_half_aspect (GstVideoMultiviewMode mv_mode
,guint width
,guint height
,guint par_n
,guint par_d
);
|
A GstVideoMultiviewMode |
|
Video frame width in pixels |
|
Video frame height in pixels |
|
Numerator of the video pixel-aspect-ratio |
|
Denominator of the video pixel-aspect-ratio |
Returns : |
A boolean indicating whether the GST_VIDEO_MULTIVIEW_FLAG_HALF_ASPECT flag should be set. Utility function that heuristically guess whether a frame-packed stereoscopic video contains half width/height encoded views, or full-frame views by looking at the overall display aspect ratio. |
Since 1.6
void gst_video_multiview_video_info_change_mode (GstVideoInfo *info
,GstVideoMultiviewMode out_mview_mode
,GstVideoMultiviewFlags out_mview_flags
);
Utility function that transforms the width/height/PAR and multiview mode and flags of a GstVideoInfo into the requested mode.
|
A GstVideoInfo structure to operate on |
|
A GstVideoMultiviewMode value |
|
A set of GstVideoMultiviewFlags |
Since 1.6