One document matched: draft-ietf-payload-rfc3016bis-03.xml


<?xml version="1.0" encoding="US-ASCII"?>
<!DOCTYPE rfc PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/rfc2629.dtd"[
  <!ENTITY RFC2119 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml">
  <!ENTITY RFC2198 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2198.xml">
  <!ENTITY RFC2629 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2629.xml">
  <!ENTITY RFC3016 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3016.xml">
  <!ENTITY RFC3261 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3261.xml">
  <!ENTITY RFC3550 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3550.xml">
  <!ENTITY RFC3640 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3640.xml">
  <!ENTITY RFC3711 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3711.xml">
  <!ENTITY RFC4288 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4288.xml">
  <!ENTITY RFC4301 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4301.xml">
  <!ENTITY RFC4566 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4566.xml">
  <!ENTITY RFC4628 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4628.xml">
  <!ENTITY RFC4629 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4629.xml">
  <!ENTITY RFC4855 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4855.xml">
  <!ENTITY RFC5109 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5109.xml">
  <!ENTITY RFC5246 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5246.xml">
  <!ENTITY RFC5583 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5583.xml">
  <!ENTITY RFC5691 PUBLIC '' "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5691.xml">

]>
<?xml-stylesheet type='text/xsl' href="http://greenbytes.de/tech/webdav/rfc2629xslt/rfc2629.xslt" ?>
<?rfc strict="yes" ?>
<?rfc toc="yes"?>
<!-- generate a ToC -->
<?rfc tocdepth="4"?>
<!-- the number of levels of subsections in ToC. default: 3 -->
<!-- control references -->
<?rfc symrefs="yes"?>
<!-- use symbolic references tags, i.e, [RFC2119] instead of [1] -->
<?rfc sortrefs="yes" ?>
<!-- sort the reference entries alphabetically -->
<!-- control vertical white space
     (using these PIs as follows is recommended by the RFC Editor) -->
<?rfc compact="yes" ?>
<!-- do not start each main section on a new page -->
<?rfc subcompact="no" ?>
<!-- keep one blank line between list items -->
<!-- end of list of popular I-D processing instructions -->
<rfc category="std"
     docName="draft-ietf-payload-rfc3016bis-03.txt"
     ipr="pre5378Trust200902"
     obsoletes="3016"
     submissionType="IETF"
     updates=""
     xml:lang="en">
  <!-- category values: std, bcp, info, exp, and historic
     ipr values: full3667, noModification3667, noDerivatives3667
     you can add the attributes updates="NNNN" and obsoletes="NNNN"
     they will automatically be output with "(if approved)" -->

  <!-- ***** FRONT MATTER ***** -->

  <front>
    <!-- The abbreviated title is used in the page header - it is only necessary if the
         full title is longer than 39 characters -->

    <title abbrev="RTP Payload Format for MPEG-4 Streams">RTP Payload Format
    for MPEG-4 Audio/Visual Streams</title>

    <!-- add 'role="editor"' below for the editors if appropriate -->

    <!-- Another author who claims to be an editor -->

    <author fullname="Malte Schmidt"
                                    initials="M.S."
                                    surname="Schmidt">
      <organization>Dolby Laboratories</organization>
      <address>
        <postal>
          <street>Deutschherrnstr. 15-19</street>
          <!-- Reorder these if your country does things differently -->
          <city>90537 Nuernberg</city>
          <region></region>
          <country>DE</country>
        </postal>
        <phone>+49 911 928 91 42</phone>
        <email>malte.schmidt@dolby.com</email>
        <!-- uri and facsimile elements may also be added -->
      </address>
    </author>

    <author fullname="Frans de Bont"
                                    initials="F.d.B."
                                    surname="de Bont">
      <organization>Philips Electronics</organization>
      <address>
        <postal>
          <street>High Tech Campus 5</street>
          <!-- Reorder these if your country does things differently -->
          <city>5656 AE Eindhoven</city>
          <region></region>
          <country>NL</country>
        </postal>
        <phone>+31 40 2740234</phone>
        <email>frans.de.bont@philips.com</email>
        <!-- uri and facsimile elements may also be added -->
      </address>
    </author>

    <author fullname="Stefan Doehla"
                                    initials="S.D."
                                    surname="Doehla">
      <organization>Fraunhofer IIS</organization>
      <address>
        <postal>
          <street>Am Wolfmantel 33</street>
          <!-- Reorder these if your country does things differently -->
          <city>91058 Erlangen</city>
          <region></region>
          <country>DE</country>
        </postal>
        <phone>+49 9131 776 6042</phone>
        <email>stefan.doehla@iis.fraunhofer.de</email>
        <!-- uri and facsimile elements may also be added -->
      </address>
    </author>

    <author fullname="Jaehwan Kim"
                                    initials="Jaehwan"
                                    surname="Kim">
      <organization>LG Electronics Inc.</organization>
      <address>
        <postal>
          <street>221, Yangjae-dong, Seocho-gu</street>
          <!-- Reorder these if your country does things differently -->
          <city>Seoul 137-130</city>
          <region></region>
          <country>Korea</country>
        </postal>
        <phone>+82 10 6225 0619</phone>
        <email>kjh1905m@naver.com</email>
        <!-- uri and facsimile elements may also be added -->
      </address>
    </author>

    <date year="2011" />

    <!-- If the month and year are both specified and are the current ones, xml2rfc will fill
         in the current day for you. If only the current year is specified, xml2rfc will fill
         in the current day and month for you. If the year is not the current one, it is
         necessary to specify at least a month (xml2rfc assumes day="1" if not specified for the
         purpose of calculating the expiry date).  With drafts it is normally sufficient to
         specify just the year. -->

    <!-- Meta-data Declarations -->

    <area>Real-time Applications and Infrastructure</area>

    <workgroup>Audio/Video Transport Payloads</workgroup>

    <!-- WG name at the upperleft corner of the doc,
         IETF is fine for individual submissions.
         If this element is not present, the default is "Network Working Group",
         which is used by the RFC Editor as a nod to the history of the IETF. -->

    <keyword>RFC3016, RTP, MPEG-4, Audio, Visual, Video, AAC, HE AAC, HE AAC v2, MPEG
    Surround</keyword>

    <!-- Keywords will be incorporated into HTML output
         files in a meta tag but they have no effect on text or nroff
         output. If you submit your draft to the RFC Editor, the
         keywords will be used for the search engine. -->

    <abstract>
      <t>This document describes Real-Time Transport Protocol (RTP) payload
      formats for carrying each of MPEG-4 Audio and MPEG-4 Visual
      bitstreams without using MPEG-4 Systems. It is a revision of RFC
      3016 and is needed because of some misalignments between RFC 3016 and 
      the 3GPP PSS specification regarding the RTP payload format for 
      MPEG-4 Audio.</t>
   
      <t>For the purpose of directly mapping MPEG-4 Audio/Visual bitstreams 
      onto RTP packets, this document provides specifications for the use
      of RTP header fields and also specifies fragmentation rules.  It also
      provides specifications for Media Type registration and the use of 
      Session Description Protocol (SDP).  The audio payload format
      described in this document has some limitations related to the
      signaling of audio codec parameters for the required multiplexing
      format.  Therefore, for new system designs should be utilize RFC 3640
      which does not have these restrictions. Nevertheless, this revision 
      of RFC 3016 is provided to update and complete the specification, and
      to enable interopeable implementations.</t>

      <t>This document obsoletes RFC 3016. It contains a summary of changes 
      from RFC 3016 and discussed backward compatibility to RFC 3016.</t>
    </abstract>
  </front>

  <!-- ***************************************************************** -->

  <middle>
    <section title="Introduction" toc="default">
      <t>The RTP payload formats described in this document specify how MPEG-4
      Audio <xref target="14496-3" /> and MPEG-4 Visual streams <xref
      target="14496-2" /> are to be fragmented
      and mapped directly onto RTP packets.</t>

      <t>These RTP payload formats enable transport of MPEG-4 Audio/Visual
      streams without using the synchronization and stream management
      functionality of MPEG-4 Systems <xref target="14496-1" />. Such RTP
      payload formats will be used in systems that have intrinsic stream
      management functionality and thus require no such functionality from
      MPEG-4 Systems. H.323 <xref target="H323" /> terminals are an example
      of such systems, where MPEG-4 Audio/Visual streams are not managed
      by MPEG-4 Systems Object Descriptors but by H.245 <xref target="H245" />.
      The streams are directly mapped onto RTP packets without using the
      MPEG-4 Systems Sync Layer. Other examples are Session Initiation
      Protocol (SIP) <xref target="RFC3261" /> and RTSP where Media Type and
      SDP are used. Media Type and SDP usages of the RTP payload formats
      described in this document are defined to directly
      specify the attribute of Audio/Visual streams (e.g., media type,
      packetization format and codec configuration) without using MPEG-4
      Systems. The obvious benefit is that these MPEG-4 Audio/Visual RTP
      payload formats can be handled in an unified way together with those
      formats defined for non-MPEG-4 codecs. The disadvantage is that
      interoperability with environments using MPEG-4 Systems may be
      difficult, hence, other payload formats may be better suited to those
      applications.</t>

      <t>The semantics of RTP headers in such cases need to be clearly
      defined, including the association with MPEG-4 Audio/Visual data
      elements. In addition, it is beneficial to define the fragmentation
      rules of RTP packets for MPEG-4 Video streams so as to enhance error
      resiliency by utilizing the error resiliency tools provided inside the
      MPEG-4 Video stream.</t>

      <section title="MPEG-4 Visual RTP Payload Format" toc="default">
        <t>MPEG-4 Visual is a visual coding standard with many features:
        high coding efficiency; high error resiliency; multiple, arbitrary
        shape object-based coding; etc. <xref target="14496-2" />. It covers a
        wide range of bitrates from scores of Kbps to several Mbps. It also
        covers a wide variety of networks, ranging from those guaranteed to be
        almost error-free to mobile networks with high error rates.</t>

        <t>With respect to the fragmentation rules for an MPEG-4 Visual
        bitstream defined in this document, since MPEG-4 Visual is used for a
        wide variety of networks, it is desirable not to apply too much
        restriction on fragmentation, and a fragmentation rule such as "a
        single video packet shall always be mapped on a single RTP packet" may
        be inappropriate. On the other hand, careless, media unaware
        fragmentation may cause degradation in error resiliency and bandwidth
        efficiency. The fragmentation rules described in this document are
        flexible but manage to define the minimum rules for preventing
        meaningless fragmentation while utilizing the error resiliency
        functionalities of MPEG-4 Visual.</t>

        <t>The fragmentation rule "Different Video Object Planes (VOPs)
        SHOULD be fragmented into different RTP packets"
        is made so that the RTP timestamp uniquely indicates the VOP
        time framing. On the other hand, MPEG-4 video may generate VOPs of
        very small size, in cases with an empty VOP (vop_coded=0) containing
        only VOP header or an arbitrary shaped VOP with a small number of
        coding blocks. To reduce the overhead for such cases, the
        fragmentation rule permits concatenating multiple VOPs in an RTP
        packet. (See fragmentation rule (4) in
        <xref target="Fragmentation of MPEG-4 Visual Bitstream" />
        and marker bit and
        timestamp in <xref target="Use of RTP Header Fields for MPEG-4 Visual" />.)
        </t>

        <t>While the additional media specific RTP header defined for such
        video coding tools as H.261 <xref target="H261" /> or MPEG-1/2 is effective in helping to
        recover picture headers corrupted by packet losses, MPEG-4 Visual has
        already error resiliency functionalities for recovering corrupt
        headers, and these can be used on RTP/IP networks as well as on other
        networks (H.223/mobile, MPEG-2/TS, etc.). Therefore, no extra RTP
        header fields are defined in this MPEG-4 Visual RTP payload
        format.</t>
      </section>

      <section title="MPEG-4 Audio RTP Payload Format"
               anchor="MPEG-4 Audio RTP Payload Format"
               toc="default">
        <t>MPEG-4 Audio is an audio standard that integrates many
        different types of audio coding tools. Low-overhead MPEG-4 Audio
        Transport Multiplex (LATM) manages the sequences of audio data with
        relatively small overhead. In audio-only applications, then, it is
        desirable for LATM-based MPEG-4 Audio bitstreams to be directly mapped
        onto RTP packets without using MPEG-4 Systems.</t>

        <t>For MPEG-4 Audio coding tools, as is true for other audio coders,
        if the payload is a single audio frame, packet loss will not impair
        the decodability of adjacent packets. Therefore, the additional media
        specific header for recovering errors will not be required for MPEG-4
        Audio. Existing RTP protection mechanisms, such as Generic Forward
        Error Correction <xref target="RFC5109" /> and Redundant Audio Data
        <xref target="RFC2198" />, MAY be applied to improve error resiliency.</t>
      </section>

      <section title="Interoperability with RFC 3016" toc="default">
        <t>This specification is not backwards compatible with <xref target="RFC3016" />
        as a binary incompatible LATM version is mandated. Existing implementations
        of RFC 3016 that use a recent LATM version may already comply to this
        specification and must be considered as not RFC 3016 compliant.
        The 3GPP PSS service <xref target="3GPP" /> is such an example as a
        more recent LATM version is mandated in the 3GPP PSS specification. 
        Existing implementations that use the LATM version as specified in
        RFC3016 MUST be updated to comply with this specification.</t>
      </section>

      <section title="Relation with RFC 3640" toc="default">
        <t>In this document a payload format for the transport of MPEG-4
        Elementary Streams is specified. For MPEG-4 Audio streams "out of band"
        signaling is defined such that a receiver is not obliged to decode the
        payload data to determine the audio codec and its configuration.
        The signaling capabilities specified in this document are less explicit
        than those defined in <xref target="RFC3640" />. But, the use of the
        MPEG-4 LATM in various transmission standards justifies its right to
        exist, see also <xref target="MPEG-4 Audio RTP Payload Format" />.</t>
      </section>
    </section>

    <section title="Definitions and Abbreviations" toc="default">
      <t>This document makes use of terms, specified in <xref target="14496-2" />, <xref
      target="14496-3" />, and <xref target="23003-1" />. In addition,
      the following terms are used in this document and have specific
      meaning within the context of this document.</t>

      <t>Abbreviations:
        <list hangIndent="0" style="empty">
          <t>AAC: Advanced Audio Coding</t>
          <t>ASC: AudioSpecificConfig</t>
          <t>HE AAC: High Efficiency AAC</t>
          <t>LATM: Low-overhead MPEG-4 Audio Transport Multiplex</t>
          <t>PS: Parametric Stereo</t>
          <t>SBR: Spectral Band Replication</t>
          <t>VOP: Video Object Plane</t>
        </list>
      </t>

      <t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
      "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
      document are to be interpreted as described in <xref
      target="RFC2119" />.</t>
    </section>

    <section title="Clarifications on specifying codec configurations for MPEG-4 Audio"
             toc="default">

      <t>For MPEG-4 Audio <xref target="14496-3" /> streams the decoder output
      configuration can differ from the core codec configuration depending of
      use of the SBR and PS tools.</t>

      <t>The core codec sampling rate is the default audio codec sampling rate.
      When SBR is used, typically the double value of the core codec sampling
      rate will be regarded as the definitive sampling rate (i.e., the
      decoder's output sampling rate) </t>
      <t>Note: The exception is downsampled SBR mode in which case the SBR
      sampling rate and core codec sampling rate are identical.</t>

      <t>The core codec channel configuration is the default audio codec channel
      configuration. When PS is used, the core codec channel configuration
      indicates one channel (i.e., mono) whereas the definitive channel
      configuration is two channels (i.e. stereo).
      When MPEG Surround is used, the definitive channel configuration
      depends on the output of the MPEG Surround decoder.</t>
    </section>

    <section title="LATM Restrictions for RTP Packetization of MPEG-4 Audio Bitstreams"
             toc="default">

      <t>While LATM has several multiplexing features as follows;
        <list hangIndent="0" style="symbols">
          <t>Carrying configuration information with audio data,</t>
          <t>Concatenation of multiple audio frames in one audio stream,</t>
          <t>Multiplexing multiple objects (programs),</t>
          <t>Multiplexing scalable layers,</t>
        </list>
      in RTP transmission there is no need for the last two
      features. Therefore, these two features MUST NOT be used in
      applications based on RTP packetization specified by this document.
      Since LATM has been developed for only natural audio coding tools,
      i.e., not for synthesis tools, it seems difficult to transmit
      Structured Audio (SA) data and Text to Speech Interface (TTSI) data by
      LATM. Therefore, SA data and TTSI data MUST NOT be transported by the
      RTP packetization in this document.</t>

      <t>For transmission of scalable streams, audio data of each layer
      SHOULD be packetized onto different RTP streams allowing for the
      different layers to be treated differently at the IP level, for
      example via some means of differentiated service. On the other hand,
      all configuration data of the scalable streams are contained in one
      LATM configuration data "StreamMuxConfig" and every scalable layer
      shares the StreamMuxConfig. The mapping between each layer and its
      configuration data is achieved by LATM header information attached to
      the audio data. In order to indicate the dependency information of the
      scalable streams, the signaling mechanism as specified in
      <xref target="RFC5583" /> SHOULD be used (see
      <xref target="Use of RTP Header Fields for MPEG-4 Audio" />).</t>
    </section>

    <section title="RTP Packetization of MPEG-4 Visual Bitstreams"
             toc="default">
      <t>This section specifies RTP packetization rules for MPEG-4 Visual
      content. An MPEG-4 Visual bitstream is mapped directly onto RTP packets
      without the addition of extra header fields or any removal of Visual
      syntax elements. The Combined Configuration/Elementary stream mode MUST
      be used so that configuration information will be carried to the same
      RTP port as the elementary stream. (see 6.2.1 "Start codes" of
      <xref target="14496-2" />) The configuration information MAY
      additionally be specified by some out-of-band means.
      If needed by systems using Media Type parameters
      and SDP parameters, "e.g., SIP and RTSP", the optional parameter "config"
      MUST be used to specify the configuration information (see
      <xref target="Media Type Registration for MPEG-4 Visual" /> and
      <xref target="Mapping to SDP for MPEG-4 Visual" />).</t>

      <t>When the short video header mode is used, the RTP payload format for
      H.263 SHOULD be used (the format defined in <xref target="RFC4629" />
      is RECOMMENDED, but the <xref target="RFC4628" /> format MAY be used
      for compatibility with older implementations).</t>

      <t>
       <figure>
          <artwork>
0                   1                   2                   3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|V=2|P|X|  CC   |M|     PT      |       sequence number         | RTP
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|                           timestamp                           | Header
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|           synchronization source (SSRC) identifier            |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|            contributing source (CSRC) identifiers             |
|                             ....                              |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|                                                               | RTP
|       MPEG-4 Visual stream (byte aligned)                     | Pay-
|                                                               | load
|                               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|                               :...OPTIONAL RTP padding        |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+

     Figure 1 - An RTP packet for MPEG-4 Visual stream
          </artwork>
        </figure>
      </t>

      <section title="Use of RTP Header Fields for MPEG-4 Visual"
               anchor="Use of RTP Header Fields for MPEG-4 Visual"
               toc="default">
        <t>Payload Type (PT): The assignment of an RTP payload type for this
        packet format is outside the scope of this document, and will not
        be specified here. It is expected that the RTP profile for a
        particular class of applications will assign a payload type for this
        encoding, or if that is not done then a payload type in the dynamic
        range SHALL be chosen by means of an out-of-band signaling protocol
        (e.g., H.245, SIP, etc).</t>

        <t>Extension (X) bit: Defined by the RTP profile used.</t>

        <t>Sequence Number: Incremented by one for each RTP data packet sent,
        starting, for security reasons, with a random initial value.</t>

        <t>Marker (M) bit: The marker bit is set to one to indicate the last
        RTP packet (or only RTP packet) of a VOP. When multiple VOPs are
        carried in the same RTP packet, the marker bit is set to one.</t>

        <t>Timestamp: The timestamp indicates the sampling instance of the VOP
        contained in the RTP packet. A constant offset, which is random, is
        added for security reasons.
        <list hangIndent="1" style="symbols">
            <t>When multiple VOPs are carried in the same RTP packet, the
            timestamp indicates the earliest of the VOP times within the VOPs
            carried in the RTP packet. Timestamp information of the rest of
            the VOPs are derived from the timestamp fields in the VOP header
            (modulo_time_base and vop_time_increment).</t>

            <t>If the RTP packet contains only configuration information
            and/or Group_of_VideoObjectPlane() fields, the timestamp of the
            next VOP in the coding order is used.</t>

            <t>If the RTP packet contains only visual_object_sequence_end_code
            information, the timestamp of the immediately preceding VOP in the
            coding order is used.</t>
          </list></t>

        <t>The resolution of the timestamp is set to its default value of
        90kHz, unless specified by an out-of-band means (e.g., SDP parameter
        or Media Type parameter as defined in
        <xref target="Media Type Registration for MPEG-4 Audio/Visual Streams" />).</t>

        <t>Other header fields are used as described in <xref target="RFC3550" />.</t>
      </section>

      <section title="Fragmentation of MPEG-4 Visual Bitstream"
               anchor="Fragmentation of MPEG-4 Visual Bitstream"
               toc="default">
        <t>A fragmented MPEG-4 Visual bitstream is mapped directly onto the
        RTP payload without any addition of extra header fields or any removal
        of Visual syntax elements. The Combined Configuration/Elementary
        streams mode is used. The following rules apply for the
        fragmentation.</t>

        <t>In the following, header means one of the following: <list
            hangIndent="1" style="symbols">
            <t>Configuration information (Visual Object Sequence Header,
            Visual Object Header and Video Object Layer Header)</t>

            <t>visual_object_sequence_end_code</t>

            <t>The header of the entry point function for an elementary stream
            (Group_of_VideoObjectPlane() or the header of VideoObjectPlane(),
            video_plane_with_short_header(), MeshObject() or FaceObject())</t>

            <t>The video packet header (video_packet_header() excluding
            next_resync_marker())</t>

            <t>The header of gob_layer()</t>

            <t>See 6.2.1 "Start codes" of <xref target="14496-2" />
            for the definition of the configuration
            information and the entry point functions.</t>
          </list></t>

        <t>(1) Configuration information and Group_of_VideoObjectPlane()
        fields SHALL be placed at the beginning of the RTP payload (just after
        the RTP header) or just after the header of the syntactically upper
        layer function.</t>

        <t>(2) If one or more headers exist in the RTP payload, the RTP
        payload SHALL begin with the header of the syntactically highest
        function. Note: The visual_object_sequence_end_code is regarded as the
        lowest function.</t>

        <t>(3) A header SHALL NOT be split into a plurality of RTP
        packets.</t>

        <t>(4) Different VOPs SHOULD be fragmented into different RTP packets
        so that one RTP packet consists of the data bytes associated with a
        unique VOP time instance (that is indicated in the timestamp field in
        the RTP packet header), with the exception that multiple consecutive
        VOPs MAY be carried within one RTP packet in the decoding order if the
        size of the VOPs is small.</t>

        <t>Note: When multiple VOPs are carried in one RTP payload, the
        timestamp of the VOPs after the first one may be calculated by the
        decoder. This operation is necessary only for RTP packets in which the
        marker bit equals to one and the beginning of RTP payload corresponds
        to a start code. (See timestamp and marker bit in
        <xref target="Use of RTP Header Fields for MPEG-4 Visual" />.)</t>

        <t>(5) It is RECOMMENDED that a single video packet is sent as a
        single RTP packet. The size of a video packet SHOULD be adjusted in
        such a way that the resulting RTP packet is not larger than the
        path-MTU. If the video packet is
        disabled by the coder configuration (by setting resync_marker_disable
        in the VOL header to 1), or in coding tools where the video packet is
        not supported, a VOP MAY be split at arbitrary byte-positions.</t>

        <t>The video packet starts with the VOP header or the video packet
        header, followed by motion_shape_texture(), and ends with
        next_resync_marker() or next_start_code().</t>
      </section>

      <section title="Examples of Packetized MPEG-4 Visual Bitstream"
               toc="default">
        <t>Figure 2 shows examples of RTP packets generated based on the
        criteria described in <xref target="Fragmentation of MPEG-4 Visual Bitstream" /></t>

        <t>(a) is an example of the first RTP packet or the random access
        point of an MPEG-4 Visual bitstream containing the configuration
        information. According to criterion (1), the Visual Object Sequence
        Header(VS header) is placed at the beginning of the RTP payload,
        preceding the Visual Object Header and the Video Object Layer
        Header(VO header, VOL header). Since the fragmentation rule defined in
        <xref target="Fragmentation of MPEG-4 Visual Bitstream" />
        guarantees that the configuration information, starting with
        visual_object_sequence_start_code, is always placed at the beginning
        of the RTP payload, RTP receivers can detect the random access point
        by checking if the first 32-bit field of the RTP payload is
        visual_object_sequence_start_code.</t>

        <t>(b) is another example of the RTP packet containing the
        configuration information. It differs from example (a) in that the RTP
        packet also contains a VOP header and a Video Packet in the VOP following the
        configuration information. Since the length of the configuration
        information is relatively short (typically scores of bytes) and an RTP
        packet containing only the configuration information may thus increase
        the overhead, the configuration information and the immediately
        following VOP can be packetized into a single RTP packet.</t>

        <t>(c) is an example of an RTP packet that contains
        Group_of_VideoObjectPlane(GOV). Following criterion (1), the GOV is
        placed at the beginning of the RTP payload. It would be a waste of
        RTP/IP header overhead to generate an RTP packet containing only a GOV
        whose length is 7 bytes. Therefore, (a part of) the following VOP can
        be placed in the same RTP packet as shown in (c).</t>

        <t>(d) is an example of the case where one video packet is packetized
        into one RTP packet. When the packet-loss rate of the underlying
        network is high, this kind of packetization is recommended. Even when
        the RTP packet containing the VOP header is discarded by a packet
        loss, the other RTP packets can be decoded by using the HEC(Header
        Extension Code) information in the video packet header. No extra RTP
        header field is necessary.</t>

        <t>(e) is an example of the case where more than one video packet is
        packetized into one RTP packet. This kind of packetization is
        effective to save the overhead of RTP/IP headers when the bit-rate of
        the underlying network is low. However, it will decrease the
        packet-loss resiliency because multiple video packets are discarded by
        a single RTP packet loss. The optimal number of video packets in an
        RTP packet and the length of the RTP packet can be determined
        considering the packet-loss rate and the bit-rate of the underlying
        network.</t>

        <t>(f) is an example of the case when the video packet is disabled by
        setting resync_marker_disable in the VOL header to 1. In this case, a
        VOP may be split into a plurality of RTP packets at arbitrary
        byte-positions. For example, it is possible to split a VOP into
        fixed-length packets. This kind of coder configuration and RTP packet
        fragmentation may be used when the underlying network is guaranteed to
        be error-free.</t>

        <t>Figure 3 shows examples of RTP packets prohibited by the criteria
        of <xref target="Fragmentation of MPEG-4 Visual Bitstream" />.</t>

        <t>Fragmentation of a header into multiple RTP packets, as in (a),
        will not only increase the overhead of RTP/IP headers but also
        decrease the error resiliency. Therefore, it is prohibited by the
        criterion (3).</t>

        <t>When concatenating more than one video packets into an RTP packet,
        VOP header or video_packet_header() are not allowed to be placed in the middle
        of the RTP payload. The packetization as in (b) is not allowed by
        criterion (2) due to the aspect of the error resiliency. Comparing
        this example with Figure 2(d), although two video packets are mapped
        onto two RTP packets in both cases, the packet-loss resiliency is not
        identical. Namely, if the second RTP packet is lost, both video
        packets 1 and 2 are lost in the case of Figure 3(b) whereas only video
        packet 2 is lost in the case of Figure 2(d).</t>

        <t>
        <figure>
          <artwork>
    +------+------+------+------+
(a) | RTP  |  VS  |  VO  | VOL  |
    |header|header|header|header|
    +------+------+------+------+

    +------+------+------+------+------+------------+
(b) | RTP  |  VS  |  VO  | VOL  | VOP  |Video Packet|
    |header|header|header|header|header|            |
    +------+------+------+------+------+------------+

    +------+-----+------------------+
(c) | RTP  | GOV |Video Object Plane|
    |header|     |                  |
    +------+-----+------------------+

    +------+------+------------+  +------+------+------------+
(d) | RTP  | VOP  |Video Packet|  | RTP  |  VP  |Video Packet|
    |header|header|    (1)     |  |header|header|    (2)     |
    +------+------+------------+  +------+------+------------+

    +------+------+------------+------+------------+------+------------+
(e) | RTP  |  VP  |Video Packet|  VP  |Video Packet|  VP  |Video Packet|
    |header|header|     (1)    |header|    (2)     |header|    (3)     |
    +------+------+------------+------+------------+------+------------+

    +------+------+------------+  +------+------------+
(f) | RTP  | VOP  |VOP fragment|  | RTP  |VOP fragment|
    |header|header|    (1)     |  |header|    (2)     | ___
    +------+------+------------+  +------+------------+

     Figure 2 - Examples of RTP packetized MPEG-4 Visual bitstream
          </artwork>
        </figure>
        </t>
        <t>
        <figure>
          <artwork>
    +------+-------------+  +------+------------+------------+
(a) | RTP  |First half of|  | RTP  |Last half of|Video Packet|
    |header|  VP header  |  |header|  VP header |            |
    +------+-------------+  +------+------------+------------+

    +------+------+----------+  +------+---------+------+------------+
(b) | RTP  | VOP  |First half|  | RTP  |Last half|  VP  |Video Packet|
    |header|header| of VP(1) |  |header| of VP(1)|header|    (2)     |
    +------+------+----------+  +------+---------+------+------------+

   Figure 3 - Examples of prohibited RTP packetization for MPEG-4 Visual
   bitstream
          </artwork>
        </figure>
        </t>
      </section>
    </section>

    <section anchor="RTP Packetization of MPEG-4 Audio Bitstreams"
             title="RTP Packetization of MPEG-4 Audio Bitstreams">
      <t>This section specifies RTP packetization rules for MPEG-4 Audio
      bitstreams. MPEG-4 Audio streams MUST be formatted LATM (Low-overhead
      MPEG-4 Audio Transport Multiplex) <xref target="14496-3" /> streams, and
      the LATM-based streams are then mapped onto RTP packets as described in the
      sections below.</t>

      <section anchor="RTP Packet Format" title="RTP Packet Format"
               toc="default">
        <t>LATM-based streams consist of a sequence of audioMuxElements that
        include one or more PayloadMux elements which carry the audio frames.
        A complete audioMuxElement or a part of one SHALL be mapped directly
        onto an RTP payload without any removal of audioMuxElement syntax
        elements (see Figure 4). The first byte of each audioMuxElement SHALL
        be located at the first payload location in an RTP packet.</t>

        <figure>
          <artwork>
0                   1                   2                   3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|V=2|P|X|  CC   |M|     PT      |       sequence number         |RTP
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|                           timestamp                           |Header
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|           synchronization source (SSRC) identifier            |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|            contributing source (CSRC) identifiers             |
|                             ....                              |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
|                                                               |RTP
:                 audioMuxElement (byte aligned)                :Payload
|                                                               |
|                               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|                               :...OPTIONAL RTP padding        |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+

             Figure 4 - An RTP packet for MPEG-4 Audio
          </artwork>
        </figure>

        <t>In order to decode the audioMuxElement, the following
        muxConfigPresent information is required to be indicated by
        out-of-band means. When SDP is utilized for this indication, the Media Type
        parameter "cpresent" corresponds to the muxConfigPresent information
        (see <xref target="Media Type Registration for MPEG-4 Audio" />).
        The following restrictions apply:
          <list
            hangIndent="1" style="symbols">
            <t>In the out-of-band configuration case the number of PayloadMux
            elements contained in each audioMuxElement can only be set once.
            If more than one PayloadMux element is contained in each audioMuxElement,
            special care is required to ensure that the last RTP packet remains
            decodable.</t>

            <t>To construct the audioMuxElement in the in-band configuration case,
            non octet aligned configuration data is inserted immediately before the one or more
            PayloadMux elements. Since the generation of RTP payloads with non
            octet aligned data is not possible with RTP hint tracks, as defined
            by the MP4 file format <xref target="14496-12" />
            <xref target="14496-14" />, this document does not support RTP hint tracks
            for the in-band configuration case.</t>
          </list></t>

        <t>muxConfigPresent: If this value is set to 1 (in-band mode), the
        audioMuxElement SHALL include an indication bit "useSameStreamMux" and
        MAY include the configuration information for audio compression
        "StreamMuxConfig". The useSameStreamMux bit indicates whether the
        StreamMuxConfig element in the previous frame is applied in the
        current frame. If the useSameStreamMux bit indicates to use the
        StreamMuxConfig from the previous frame, but if the previous frame has
        been lost, the current frame may not be decodable. Therefore, in case
        of in-band mode, the StreamMuxConfig element SHOULD be transmitted
        repeatedly depending on the network condition. On the other hand, if
        muxConfigPresent is set to 0 (out-band mode), the StreamMuxConfig
        element is required to be transmitted by an out-of-band means. In case
        of SDP, Media Type parameter "config" is utilized (see
        <xref target="Media Type Registration for MPEG-4 Audio" />).</t>
      </section>

      <section title="Use of RTP Header Fields for MPEG-4 Audio"
               anchor="Use of RTP Header Fields for MPEG-4 Audio"
               toc="default">
        <t>Payload Type (PT): The assignment of an RTP payload type for this
        packet format is outside the scope of this document, and will only
        be restricted here. It is expected that the RTP profile for a
        particular class of applications will assign a payload type for this
        encoding, or if that is not done then a payload type in the dynamic
        range shall be chosen by means of an out-of-band signaling protocol
        (e.g., H.245, SIP, etc). In the dynamic assignment of RTP payload
        types for scalable streams, the server SHALL assign a different value
        to each layer. The dependency relationships between the enhanced layer
        and the base layer MUST be signaled as specified in
        <xref target="RFC5583" />. An example of the use of such signaling
        for scalable audio streams can be found in <xref target="RFC5691" />.</t>

        <t>Marker (M) bit: The marker bit indicates audioMuxElement
        boundaries. It is set to one to indicate that the RTP packet contains
        a complete audioMuxElement or the last fragment of an
        audioMuxElement.</t>

        <t>Timestamp: The timestamp indicates the sampling instance of the
        first audio frame contained in the RTP packet. Timestamps are
        RECOMMENDED to start at a random value for security reasons.</t>

        <t>Unless specified by an out-of-band means, the resolution of the
        timestamp is set to its default value of 90 kHz.</t>

        <t>Sequence Number: Incremented by one for each RTP packet sent,
        starting, for security reasons, with a random value.</t>

        <t>Other header fields are used as described in <xref target="RFC3550" />.</t>
      </section>

      <section title="Fragmentation of MPEG-4 Audio Bitstream" toc="default">
        <t>It is RECOMMENDED to put one audioMuxElement in each RTP packet. If
        the size of an audioMuxElement can be kept small enough that the size
        of the RTP packet containing it does not exceed the size of the
        path-MTU, this will be no problem. If it cannot, the audioMuxElement
        SHALL be fragmented and spread across multiple packets.</t>
      </section>
    </section>

    <section title="Media Type Registration for MPEG-4 Audio/Visual Streams"
             anchor="Media Type Registration for MPEG-4 Audio/Visual Streams"
             toc="default">
      <t>The following sections describe the Media Type registrations for
      MPEG-4 Audio/Visual streams, which are registered in accordance with
      <xref target="RFC4855" /> and uses the template of <xref target="RFC4288" />.
      Media Type registration and SDP usage for
      the MPEG-4 Visual stream are described in
      <xref target="Media Type Registration for MPEG-4 Visual" /> and
      <xref target="Mapping to SDP for MPEG-4 Visual" />,
      respectively, while Media Type registration and SDP usage for MPEG-4
      Audio stream are described in
      <xref target="Media Type Registration for MPEG-4 Audio" /> and
      <xref target="Mapping to SDP for MPEG-4 Audio" />, respectively.</t>

      <section title="Media Type Registration for MPEG-4 Visual"
               anchor="Media Type Registration for MPEG-4 Visual"
               toc="default">

        <t>The receiver MUST ignore any unspecified parameter, to
        ensure that additional parameters can be added in any future revision
        of this specification.</t>

        <t>Type name: video</t>

        <t>Subtype name: MP4V-ES</t>

        <t>Required parameters: none</t>

        <t>Optional parameters: <list hangIndent="1" style="empty">
            <t>rate: This parameter is used only for RTP transport. It
            indicates the resolution of the timestamp field in the RTP header.
            If this parameter is not specified, its default value of 90000
            (90kHz) is used.</t>

            <t>profile-level-id: A decimal representation of MPEG-4 Visual
            Profile and Level indication value (profile_and_level_indication)
            defined in Table G-1 of <xref target="14496-2" />.
            This parameter MAY be used in the
            capability exchange or session setup procedure to indicate MPEG-4
            Visual Profile and Level combination of which the MPEG-4 Visual
            codec is capable. If this parameter is not specified by the
            procedure, its default value of 1 (Simple Profile/Level 1) is
            used.</t>

            <t>config: This parameter SHALL be used to indicate the
            configuration of the corresponding MPEG-4 Visual bitstream. It
            SHALL NOT be used to indicate the codec capability in the
            capability exchange procedure. It is a hexadecimal representation
            of an octet string that expresses the MPEG-4 Visual configuration
            information, as defined in subclause 6.2.1 Start codes of
            <xref target="14496-2" />. The
            configuration information is mapped onto the octet string in an
            MSB-first basis. The first bit of the configuration information
            SHALL be located at the MSB of the first octet. The configuration
            information indicated by this parameter SHALL be the same as the
            configuration information in the corresponding MPEG-4 Visual
            stream, except for first_half_vbv_occupancy and
            latter_half_vbv_occupancy, if exist, which may vary in the
            repeated configuration information inside an MPEG-4 Visual stream
            (See 6.2.1 Start codes of <xref target="14496-2" />).</t>
          </list></t>

        <t>Published specification:</t>

        <t>
          <list hangIndent="1" style="empty">
            <t>The specifications for MPEG-4 Visual streams are presented in
            <xref target="14496-2" />. The RTP payload format is described in [RFCxxxx].</t>
          </list>
        </t>

        <t>Encoding considerations:</t>

        <t>
          <list hangIndent="1" style="empty">
            <t>Video bitstreams MUST be generated according to MPEG-4 Visual
            specifications <xref target="14496-2" />. A video bitstream is binary data
            and MUST be encoded for non-binary transport (for Email, the
            Base64 encoding is sufficient). This type is also defined for
            transfer via RTP. The RTP packets MUST be packetized according to
            the MPEG-4 Visual RTP payload format defined in [RFCxxxx].</t>
          </list>
        </t>

        <t>Security considerations:</t>

        <t>
          <list hangIndent="1" style="empty">
            <t>See <xref target="Security Considerations" /> of [RFCxxxx].</t>
          </list>
        </t>

        <t>Interoperability considerations:</t>

        <t>
          <list hangIndent="1" style="empty">
            <t>MPEG-4 Visual provides a large and rich set of tools for the
            coding of visual objects. For effective implementation of the
            standard, subsets of the MPEG-4 Visual tool sets have been
            provided for use in specific applications. These subsets, called
            'Profiles', limit the size of the tool set a decoder is required
            to implement. In order to restrict computational complexity, one
            or more Levels are set for each Profile. A Profile@Level
            combination allows:
            <list hangIndent="0" style="symbols">

            <t>a codec builder to implement only the subset of the standard
            he needs, while maintaining interworking with other MPEG-4 devices
            included in the same combination, and</t>

            <t>checking whether MPEG-4 devices comply with the standard
            ('conformance testing').</t>
            </list></t>
          </list>
        </t>

        <t>
          <list hangIndent="1" style="empty">
            <t>The visual stream SHALL be compliant with the MPEG-4 Visual
            Profile@Level specified by the parameter "profile-level-id".
            Interoperability between a sender and a receiver may be achieved
            by specifying the parameter "profile-level-id", or
            by arranging a capability exchange/announcement procedure for
            this parameter.</t>
          </list>
        </t>

        <t>Applications which use this Media Type:</t>

        <t>
          <list hangIndent="1" style="empty">
            <t>Audio and visual streaming and conferencing tools</t>
          </list>
        </t>

        <t>Additional information: none</t>

        <t>Person and email address to contact for further information: <list
            hangIndent="1" style="empty">
            <t>See Authors' Address section at the end of [RFCxxxx].</t>
          </list></t>

        <t>Intended usage: COMMON</t>

        <t>Author: <list hangIndent="1" style="empty">
            <t>See Authors' Address section at the end of [RFCxxxx].</t>
          </list></t>

        <t>Change controller: <list hangIndent="1" style="empty">
            <t>IETF Audio/Video Transport Payloads working group delegated from the IESG.</t>
          </list></t>
      </section>

      <section title="Mapping to SDP for MPEG-4 Visual"
               anchor="Mapping to SDP for MPEG-4 Visual"
               toc="default">
        <t>The Media Type video/MP4V-ES string is mapped to fields in the
        Session Description Protocol (SDP) <xref target="RFC4566"> </xref>, as follows:
          <list hangIndent="1" style="symbols">
            <t>The Media Type (video) goes in SDP "m=" as the media name.</t>

            <t>The Media subtype (MP4V-ES) goes in SDP "a=rtpmap" as the
            encoding name.</t>

            <t>The optional parameter "rate" goes in "a=rtpmap" as the clock
            rate.</t>

            <t>The optional parameter "profile-level-id" and "config" go in
            the "a=fmtp" line to indicate the coder capability and
            configuration, respectively. These parameters are expressed as a
            string, in the form of as a semicolon separated
            list of parameter=value pairs.
              <figure>
                <artwork>
   Example usages for the profile-level-id parameter are:
   1  : MPEG-4 Visual Simple Profile/Level 1
   34 : MPEG-4 Visual Core Profile/Level 2
   145: MPEG-4 Visual Advanced Real Time Simple Profile/Level 1
                </artwork>
              </figure>
            </t>
          </list></t>

        <section title="Declarative SDP Usage for MPEG-4 Visual" toc="default">
          <t>The following are some examples of media representation in SDP:</t>
          <t>
            <figure>
              <artwork>
Simple Profile/Level 1, rate=90000(90kHz), "profile-level-id" and
"config" are present in "a=fmtp" line:
  m=video 49170/2 RTP/AVP 98
  a=rtpmap:98 MP4V-ES/90000
  a=fmtp:98 profile-level-id=1;config=000001B001000001B50900000100000001
     20008440FA282C2090A21F

Core Profile/Level 2, rate=90000(90kHz), "profile-level-id" is present
in "a=fmtp" line:
  m=video 49170/2 RTP/AVP 98
  a=rtpmap:98 MP4V-ES/90000
  a=fmtp:98 profile-level-id=34

Advance Real Time Simple Profile/Level 1, rate=90000(90kHz),
"profile-level-id" is present in "a=fmtp" line:
  m=video 49170/2 RTP/AVP 98
  a=rtpmap:98 MP4V-ES/90000
  a=fmtp:98 profile-level-id=145
              </artwork>
            </figure>
          </t>
        </section>
      </section>

      <section title="Media Type Registration for MPEG-4 Audio"
               anchor="Media Type Registration for MPEG-4 Audio"
               toc="default">

        <t>The receiver MUST ignore any unspecified parameter, to
        ensure that additional parameters can be added in any future revision
        of this specification.</t>

        <t>Type name: audio</t>

        <t>Subtype name: MP4A-LATM</t>

        <t>Required parameters: <list hangIndent="1" style="empty">
            <t>rate: the rate parameter indicates the RTP time stamp clock
            rate. The default value is 90000. Other rates MAY be indicated
            only if they are set to the same value as the audio sampling rate
            (number of samples per second).</t>

            <t>In the presence of SBR, the sampling rates for the core
            en-/decoder and the SBR tool are different in most cases. This
            parameter SHALL therefore NOT be considered as the definitive
            sampling rate. If this parameter is used, the server must
            follow the rules below:
            <list hangIndent="1" style="symbols">
                <t>When the presence of SBR is not explicitly signaled by the
                optional SDP parameters such as object parameter,
                profile-level-id or config string, this parameter SHALL be set
                to the core codec sampling rate.</t>

                <t>When the presence of SBR is explicitly signaled by the
                optional SDP parameters such as object parameter,
                profile-level-id or config string this parameter SHALL be set
                to the SBR sampling rate.</t>
            </list></t>

            <t>NOTE: The optional parameter SBR-enabled in SDP a=fmtp is
            useful for implicit HE AAC / HE AAC v2 signaling. But the
            SBR-enabled parameter can also be used in the case of explicit
            HE AAC / HE AAC v2 signaling. Therefore, its existence
            itself is not the criteria to determine whether HE AAC / HE AAC v2
            signaling is explicit or not. </t>
          </list></t>

        <t>Optional parameters: <list hangIndent="1" style="empty">
            <t>profile-level-id: a decimal representation of MPEG-4 Audio
            Profile Level indication value defined in <xref
            target="14496-3" />. This parameter indicates
            which MPEG-4 Audio tool subsets the decoder is capable of using.
            If this parameter is not specified in the capability exchange or
            session setup procedure, its default value of 30 (Natural Audio
            Profile/Level 1) is used.</t>

            <t>MPS-profile-level-id: a decimal representation of the MPEG
            Surround Profile Level indication as defined in <xref
            target="14496-3" />. This parameter indicates the support of the
            MPEG Surround profile and level by the decoder
            to be capable to decode the stream.</t>

            <t>object: a decimal representation of the MPEG-4 Audio Object
            Type value defined in <xref target="14496-3" />.
            This parameter specifies the tool to be used by
            the decoder. It CAN be used to limit the capability within the
            specified "profile-level-id".</t>

            <t>bitrate: the data rate for the audio bit stream.</t>

            <t>cpresent: a boolean parameter indicates whether audio payload
            configuration data has been multiplexed into an RTP payload (see
            <xref target="RTP Packet Format" />).
            A 0 indicates the configuration data has not been
            multiplexed into an RTP payload and in this case the "config"
            parameter MUST be present, a 1 indicates that it has. The default
            if the parameter is omitted is 1. If this parameter is set to 1
            and the "config" parameter is present, the multiplexed
            configuration data and the value of the "config" parameter SHALL
            be consistent. </t>

            <t>config: a hexadecimal representation of an octet string that
            expresses the audio payload configuration data "StreamMuxConfig",
            as defined in <xref target="14496-3" />.
            Configuration data is mapped onto the octet string in an MSB-first
            basis. The first bit of the configuration data SHALL be located at
            the MSB of the first octet. In the last octet, zero-padding bits,
            if necessary, SHALL follow the configuration data.
            Senders MUST set the StreamMuxConfig elements
            taraBufferFullness and latmBufferFullness to their largest
            respective value, indicating that buffer fullness measures are not
            used in SDP. Receivers MUST ignore the value of these two elements
            contained in the config parameter.</t>

            <t>MPS-asc: a hexadecimal representation of an octet string that
            expresses audio payload configuration data "AudioSpecificConfig",
            as defined in <xref target="14496-3" />. If
            this parameter is not present the relevant signaling is performed
            by other means (e.g. in-band or contained in the config
            string).</t>

            <t>The same mapping rules as for the config parameter apply.</t>

            <t>ptime: duration of each packet in milliseconds.</t>

            <t>SBR-enabled: a boolean parameter which indicates whether
            SBR-data can be expected in the RTP-payload of a stream. This
            parameter is relevant for an SBR-capable decoder if the presence
            of SBR can not be detected from an out-of-band decoder
            configuration (e.g. contained in the config string).</t>

            <t>If this parameter is set to 0, a decoder MAY expect that SBR
            is not used. If this parameter is set to 1, a decoder CAN
            upsample the audio data with the SBR tool, regardless whether SBR
            data is present in the stream or not.</t>

            <t>If the presence of SBR can not be detected from out-of-band
            configuration and the SBR-enabled parameter is not present, the
            parameter defaults to 1 for an SBR-capable decoder. If the
            resulting output sampling rate or the computational complexity is
            not supported, the SBR tool can be disabled or run in downsampled
            mode.</t>

            <t>The timestamp resolution at RTP layer is determined by the
            rate parameter.</t>
          </list></t>

        <t>Published specification: <list hangIndent="1" style="empty">
            <t>Encoding specifications are provided in <xref target="14496-3" />.
            The RTP payload format specification
            is described in [RFCxxxx].</t>
          </list></t>

        <t>Encoding considerations: <list hangIndent="1" style="empty">
            <t>This type is only defined for transfer via RTP.</t>
          </list></t>

        <t>Security considerations: <list hangIndent="1" style="empty">
            <t>See <xref target="Security Considerations" /> of [RFCxxxx].</t>
          </list></t>

        <t>Interoperability considerations: <list hangIndent="1" style="empty">
            <t>MPEG-4 Audio provides a large and rich set of tools for the
            coding of audio objects. For effective implementation of the
            standard, subsets of the MPEG-4 Audio tool sets similar to those
            used in MPEG-4 Visual have been provided (see
            <xref target="Media Type Registration for MPEG-4 Visual" />).</t>

            <t>The audio stream SHALL be compliant with the MPEG-4 Audio
            Profile@Level specified by the parameters "profile-level-id" and
            "MPS-profile-level-id". Interoperability between a sender and a
            receiver may be achieved by specifying the parameters
            "profile-level-id" and "MPS-profile-level-id", or
            by arranging in the capability exchange procedure to set this
            parameter mutually to the same value. Furthermore, the "object"
            parameter can be used to limit the capability within the specified
            Profile@Level in capability exchange.</t>
          </list></t>

        <t>Applications which use this media type: <list hangIndent="1"
            style="empty">
            <t>Audio and video streaming and conferencing tools.</t>
          </list></t>

        <t>Additional information: none</t>

        <t>Personal and email address to contact for further information:
        <list hangIndent="1" style="empty">
            <t>See Authors' Address section at the end of [RFCxxxx].</t>
          </list></t>

        <t>Intended usage: COMMON</t>

        <t>Author: <list hangIndent="1" style="empty">
            <t>See Authors' Address section at the end of [RFCxxxx].</t>
          </list></t>

        <t>Change controller: <list hangIndent="1" style="empty">
            <t>IETF Audio/Video Transport Payloads working group delegated from the IESG.</t>
          </list></t>
      </section>

      <section title="Mapping to SDP for MPEG-4 Audio"
               anchor="Mapping to SDP for MPEG-4 Audio"
               toc="default">
        <t>The Media Type audio/MP4A-LATM string is mapped to fields in
        the Session Description Protocol (SDP) <xref target="RFC4566"> </xref>, as follows:
        <list hangIndent="1" style="symbols">
            <t>The Media Type (audio) goes in SDP "m=" as the media name.</t>

            <t>The Media subtype (MP4A-LATM) goes in SDP "a=rtpmap" as the
            encoding name.</t>

            <t>The required parameter "rate" goes in "a=rtpmap" as the clock
            rate.</t>

            <t>The optional parameter "ptime" goes in SDP "a=ptime"
            attribute.</t>

            <t>The optional parameters "profile-level-id", "MPS-profile-level-id"
            and "object" goes in the "a=fmtp" line to indicate the coder capability.
              <figure>
                <artwork>
   The following are some examples of the profile-level-id value:
   1 : Main Audio Profile Level 1
   9 : Speech Audio Profile Level 1
   15: High Quality Audio Profile Level 2
   30: Natural Audio Profile Level 1
   44: High Efficiency AAC Profile Level 2
   48: High Efficiency AAC v2 Profile Level 2
   55: Baseline MPEG Surround Profile (see ISO/IEC 23003-1) Level 3
                </artwork>
              </figure>
            The optional payload-format-specific parameters "bitrate",
            "cpresent", "config", "MPS-asc" and "SBR-enabled" go also in
            the "a=fmtp" line. These parameters are expressed as a
            string, in the form of as a semicolon separated list of
            parameter=value pairs.</t>
          </list></t>

        <section title="Declarative SDP Usage for MPEG-4 Audio"
                 anchor="Declarative SDP Usage for MPEG-4 Audio"
                 toc="default">
          <t>The following sections contain some examples of the media
          representation in SDP.</t>

          <t>Note that the a=fmtp line in some of the examples has been
          wrapped to fit the page; they would comprise a single
          line in the SDP file.</t>

          <section title="Example: In-band Configuration" toc="default">
          <t>In this example the audio configuration data
                  appears in the RTP payload exclusively (i.e., the MPEG-4 audio
                  configuration is known when a StreamMuxConfig element appears
                  within the RTP payload).
            <figure>
              <artwork>
   m=audio 49230 RTP/AVP 96
   a=rtpmap:96 MP4A-LATM/90000
   a=fmtp:96 object=2; cpresent=1
              </artwork>
            </figure>
          </t>
          <t>The "clock rate" is set to 90kHz. This is the default value and
                  the real audio sampling rate is known when the audio configuration
                  data is received.</t>

          </section>

          <section title="Example: 6kb/s CELP" toc="default">
          <t>6 kb/s CELP bitstreams (with an audio sampling rate of 8 kHz)
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/8000
  a=fmtp:96 profile-level-id=9; object=8; cpresent=0;
    config=40008B18388380
  a=ptime:20
              </artwork>
            </figure>
          </t>
          <t>In this example audio configuration data is not
          multiplexed into the RTP payload and is described only in SDP.
          Furthermore, the "clock rate" is set to the audio sampling rate.</t>
          </section>

          <section title="Example: 64 kb/s AAC LC Stereo" toc="default">
          <t>64 kb/s AAC LC stereo bitstream (with an audio sampling rate
          of 24 kHz)
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/24000/2
  a=fmtp:96 profile-level-id=1; bitrate=64000; cpresent=0;
    object=2; config=400026203fc0
              </artwork>
            </figure>
          </t>

          <t>In this example audio configuration data is not
          multiplexed into the RTP payload and is described only in SDP.
          Furthermore, the "clock rate" is set to the audio sampling rate.</t>

          <t>In this example, the presence of SBR can not be determined
          by the SDP parameter set. The clock rate represents the core
          codec sampling rate. An SBR enabled decoder can use the SBR tool to
          upsample the audio data if complexity and resulting output sampling rate permits.</t>
          </section>

          <section title="Example: Use of the SBR-enabled Parameter" toc="default">
          <t>These two examples are identical to the example above with the
          exception of the SBR-enabled parameter.
          The presence of SBR is not signaled by the SDP parameters object,
          profile-level-id and config, but instead the SBR-enabled parameter
          is present. The rate parameter and the StreamMuxConfig contain the
          core codec sampling rate.</t>

          <t>Example with "SBR-enabled=0", definitive and core codec sampling rate 24kHz:
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/24000/2
  a=fmtp:96 profile-level-id=1; bitrate=64000; cpresent=0;
    SBR-enabled=0; config=400026203fc0
              </artwork>
            </figure>
          </t>

          <t>Example with "SBR-enabled=1", core codec sampling rate 24kHz, definitive and SBR sampling rate 48kHz:
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/24000/2
  a=fmtp:96 profile-level-id=1; bitrate=64000; cpresent=0;
    SBR-enabled=1; config=400026203fc0
              </artwork>
            </figure>
          </t>

          <t> In this example, the clock rate is still 24000 and
          this information is used for RTP timestamp calculation. The
          value of 24000 is used to support old AAC decoders. This makes the
          decoder supporting only AAC understand the HE AAC coded data, although only
          plain AAC is supported.
          A HE AAC decoder is able to generate output data with the SBR sampling rate.</t>
          </section>

          <section title="Example: Hierarchical Signaling of SBR" toc="default">

          <t>When the presence of SBR is explicitly signaled by the SDP
          parameters object, profile-level-id or the config string as in the
          example below, the StreamMuxConfig contains both the core codec
          sampling rate and the SBR sampling rate.

            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/48000/2
  a=fmtp:96 profile-level-id=44; bitrate=64000; cpresent=0;
    config=40005623101fe0; SBR-enabled=1
              </artwork>
            </figure>
          </t>

          <t>This config string uses the explicit signaling mode 2.A
          (hierarchical signaling; See <xref target="14496-3" />.
          This means that the AOT(Audio Object Type) is SBR(5) and
          SFI(Sampling Frequency Index) is 6(24000 Hz) which refers to the
          underlying core codec sampling frequency. CC(Channel Configuration)
          is stereo(2), and the ESFI(Extension Sampling Frequency Index)=3
          (48000) is referring to the sampling frequency of the extension
          tool(SBR).</t>
          </section>

          <section title="Example: HE AAC v2 Signaling" toc="default">
          <t>HE AAC v2 decoders are required to always produce a stereo
                  signal from a mono signal. Hence, there is no parameter necessary to signal
                  the presence of PS.</t>
          <t>Example with "SBR-enabled=1" and 1 channel signaled in the a=rtpmap line and within the config parameter.
                  Core codec sampling rate is 24kHz, definitive and SBR sampling rate is 48kHz.
                 Core codec channel configuration is mono, PS channel configuration is stereo.
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 110
  a=rtpmap:110 MP4A-LATM/24000/1
  a=fmtp:110 profile-level-id=15; object=2; cpresent=0;
    config=400026103fc0; SBR-enabled=1
              </artwork>
            </figure>
            </t>
          </section>

          <section title="Example: Hierarchical Signaling of PS" toc="default">
                <t>Example: 48khz stereo audio input:
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 110
  a=rtpmap:110 MP4A-LATM/48000/2
  a=fmtp:110 profile-level-id=48; cpresent=0; config=4001d613101fe0
              </artwork>
            </figure>
          </t>

          <t>The config parameter indicates explicit hierarchical signaling of
          PS and SBR. This configuration method is not supported by legacy AAC an HE AAC
          decoders and these are therefore unable to decode the the coded data.</t>

          </section>

          <section title="Example: MPEG Surround" toc="default">
          <t>The following examples show how MPEG Surround configuration data
          can be signaled using SDP. The configuration is carried within the
          config string in the first example by using two different layers. The
          general parameters in this example are: AudioMuxVersion=1;
          allStreamsSameTimeFraming=1; numSubFrames=0; numProgram=0; numLayer=1.
          The first layer describes the HE AAC payload and signals the following
          parameters: ascLen=25; audioObjectType=2 (AAC LC);
          extensionAudioObjectType=5 (SBR); samplingFrequencyIndex=6 (24kHz);
          extensionSamplingFrequencyIndex=3 (48kHz); channelConfiguration=2 (2.0
          channels). The second layer describes the MPEG surround payload and
          specifies the following parameters: ascLen=110; AudioObjectType=30
          (MPEG Surround); samplingFrequencyIndex=3 (48kHz);
          channelConfiguration=6 (5.1 channels); sacPayloadEmbedding=1;
          SpatialSpecificConfig=(48 kHz; 32 slots; 525 tree; ResCoding=1;
          ResBands=[7,7,7,7]).</t>

          <t>In this example the signaling is carried by using two different
          LATM layers. The MPEG surround payload is carried together with the
          AAC payload in a single layer as indicated by the sacPayloadEmbedding
          Flag.
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/48000
  a=fmtp:96 profile-level-id=1; bitrate=64000; cpresent=0;
    SBR-enabled=1;
    config=8FF8004192B11880FF0DDE3699F2408C00536C02313CF3CE0FF0
              </artwork>
            </figure>
          </t>
          </section>

          <section title="Example: MPEG Surround with Extended SDP Parameters"
                                  toc="default">
          <t>The following example is an extension of the configuration given
          above by the MPEG Surround specific parameters. The MPS-asc parameter
          specifies the MPEG Surround Baseline Profile at Level 3 (PLI55) and
          the MPS-asc string contains the hexadecimal representation of the MPEG
          Surround ASC [audioObjectType=30 (MPEG Surround);
          samplingFrequencyIndex=0x3 (48kHz); channelConfiguration=6 (5.1
          channels); sacPayloadEmbedding=1; SpatialSpecificConfig=(48 kHz; 32
          slots; 525 tree; ResCoding=1; ResBands=[0,13,13,13])].
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/48000
  a=fmtp:96 profile-level-id=44; bitrate=64000; cpresent=0;
    config=40005623101fe0; MPS-profile-level-id=55;
    MPS-asc=F1B4CF920442029B501185B6DA00;
              </artwork>
            </figure>
          </t>
          </section>

          <section title="Example: MPEG Surround with Single Layer Configuration"
                                  toc="default">
          <t>The following example shows how MPEG Surround configuration data can
          be signaled using the SDP config parameter.  The configuration is
          carried within the config string using a single layer.
          The general parameters in this example are: AudioMuxVersion=1;
          allStreamsSameTimeFraming=1; numSubFrames=0; numProgram=0;
          numLayer=0. The single layer describes the combination of HE AAC
          and MPEG Surround payload and signals the following parameters:
          ascLen=101; audioObjectType=2 (AAC LC);
          extensionAudioObjectType=5 (SBR); samplingFrequencyIndex=7 (22.05kHz);
          extensionSamplingFrequencyIndex=7 (44.1kHz); channelConfiguration=2
          (2.0 channels).  A backward compatible extension according to
          <xref target="14496-3/Amd.1" /> signals the presence of MPEG surround
          payload data and specifies the following parameters:
          SpatialSpecificConfig=(44.1 kHz; 32 slots; 525 tree; ResCoding=0).</t>

          <t>In this example the signaling is carried by using a single LATM
          layer.  The MPEG surround payload is carried together with the HE AAC
          payload in a single layer.
            <figure>
              <artwork>
  m=audio 49230 RTP/AVP 96
  a=rtpmap:96 MP4A-LATM/44100
  a=fmtp:96 profile-level-id=44; bitrate=64000; cpresent=0;
    SBR-enabled=1; config=8FF8000652B920876A83A1F440884053620FF0;
    MPS-profile-level-id=55
              </artwork>
            </figure>
          </t>
          </section>
        </section>
      </section>
    </section>

    <!-- This PI places the pagebreak correctly (before the section title) in the text output. -->

    <?rfc needLines="8" ?>

    <section title="IANA Considerations" toc="default">
      <t>This document updates the media subtypes "MP4A-LATM" and "MP4V-ES"
      from RFC 3016. The new registrations are in
      <xref target="Media Type Registration for MPEG-4 Visual" /> and
      <xref target="Media Type Registration for MPEG-4 Audio" /> of this document.</t>
    </section>

    <!-- Possibly a 'Contributors' section ... -->
    <section title="Acknowledgements" toc="default">
      <t>The authors would like to thank Yoshihiro Kikuchi, Yoshinori Matsui,
      Toshiyuki Nomura, Shigeru Fukunaga and Hideaki Kimata for their work
      on RFC 3016, and Ali Begen, Keith Drage, Roni Even and Qin Wu for their
      valuable input and comments on this document.</t>
    </section>

    <section anchor="Security Considerations" title="Security Considerations" toc="default">
      <t>RTP packets using the payload format defined in this specification
      are subject to the security considerations discussed in the RTP
      specification <xref target="RFC3550" />, and in any applicable RTP profile. The main
      security considerations for the RTP packet carrying the RTP payload
      format defined within this document are confidentiality, integrity, and
      source authenticity.  Confidentiality is achieved by encryption of
      the RTP payload, and integrity of the RTP packets through a suitable
      cryptographic integrity protection mechanism. A cryptographic system
      may also allow the authentication of the source of the payload. A
      suitable security mechanism for this RTP payload format should
      provide confidentiality, integrity protection, and at least source
      authentication capable of determining whether or not an RTP packet is
      from a member of the RTP session.</t>

      <t>Note that most MPEG-4 codecs define an extension mechanism to
      transmit extra data within a stream that is gracefully skipped by
      decoders that do not support this extra data. This covert channel
      may be used to transmit unwanted data in an otherwise valid stream.</t>

      <t>The appropriate mechanism to provide security to RTP and
      payloads following this may vary. It is dependent on the
      application, the transport, and the signaling protocol employed.
      Therefore, a single mechanism is not sufficient, although if
      suitable, the usage of the Secure Real-time Transport Protocol (SRTP)
      <xref target="RFC3711" /> is recommended. Other mechanisms that may
      be used are IPsec <xref target="RFC4301" /> and Transport Layer
      Security (TLS) <xref target="RFC5246" /> (e.g., for RTP over TCP),
      but other alternatives may also exist.</t>

      <t>This RTP payload format and its media decoder do not exhibit any
      significant non-uniformity in the receiver-side computational
      complexity for packet processing, and thus are unlikely to pose a
      denial-of-service threat due to the receipt of pathological data.
      The complete MPEG-4 system allows for transport of a wide range of
      content, including Java applets (MPEG-J) and scripts. Since this payload
      format is restricted to audio and video streams, it is not possible to
      transport such active content in this format.</t>
    </section>

    <section title="Differences to RFC 3016" toc="default">
      <t>The RTP payload format for MPEG-4 Audio as specified in RFC 3016 is used by the
      <xref target="3GPP" >3GPP PSS service</xref>. However,
      there are some misalignments between RFC 3016 and the 3GPP PSS
      specification that are addressed by this update:
      <list hangIndent="0" style="symbols">
        <t>The audio payload format (LATM) referenced in this document is the newer
           format specified in <xref target="14496-3" />, which is binary
           compatible to the format used in <xref target="3GPP" />. This newer format
           is not binary compatible with the LATM referenced in RFC 3016, which is
           specified in <xref target="14496-3:1999/Amd.1:2000" />. </t>

        <t>The audio signaling format (StreamMuxConfig) referenced in this document
           is binary compatible to the format used in <xref target="3GPP" />. The
           StreamMuxConfig element has also been revised by MPEG since RFC 3016.</t>

        <t>The use of an audio parameter "SBR-enabled" is now defined in this document,
           which is used by 3GPP implementations <xref target="3GPP" />.
           RFC 3016 does not define this parameter.</t>

        <t>The rate parameter is defined unambiguously in this document for the case of
           presence of SBR (Spectral Band Replication). In RFC 3016 the definition
           of the rate parameter is ambiguous.</t>

        <t>The number of audio channels parameter is defined unambiguously
           in this document for the case of presence of PS (Parametric Stereo).
           In RFC 3016 PS is not defined yet.</t>
      </list>
      Furthermore some comments have been addressed and signaling
      support for MPEG surround <xref target="23003-1" /> was added.</t>

      <t>Below a summary of the changes in requirements by this update:
      <list hangIndent="0" style="symbols">
        <t>In the dynamic assignment of RTP payload types for scalable MPEG-4
        Audio streams, the server SHALL assign a different value to each layer.</t>
        <t>The dependency relationships between the enhanced layer and the
        base layer for scalable MPEG-4 Audio streams MUST be signaled as
        specified in <xref target="RFC5583" />.</t>
        <t>If the size of an audioMuxElement is so large that the size
        of the RTP packet containing it does exceed the size of the path-MTU,
        the audioMuxElement SHALL be fragmented and spread across multiple packets.</t>
        <t>The receiver MUST ignore any unspecified parameter, to ensure that
        additional parameters can be added in any future revision of this
        specification.</t>
      </list>
      </t>
    </section>
  </middle>

  <!-- ***************************************************************** -->

  <!--  *****BACK MATTER ***** -->

  <back>
    <!-- References split into informative and normative -->

    <!-- There are 2 ways to insert reference entries from the citation libraries:
     1. define an ENTITY at the top, and use "ampersand character"RFC2629; here (as shown)
     2. simply use a PI "less than character"?rfc include="reference.RFC.2119.xml"?> here
        (for I-Ds: include="reference.I-D.narten-iana-considerations-rfc2434bis.xml")

     Both are cited textually in the same manner: by using xref elements.
     If you use the PI option, xml2rfc will, by default, try to find included files in the same
     directory as the including file. You can also define the XML_LIBRARY environment variable
     with a value containing a set of directories to search.  These can be either in the local
     filing system or remote ones accessed by http (http://domain/dir/... ).-->

    <references title="Normative References">
      &RFC2119;
      &RFC3550;
      &RFC4288;
      &RFC4566;
      &RFC4629;
      &RFC4855;
      &RFC5583;

      <reference anchor="14496-2">
        <front>
          <title>ISO/IEC International Standard 14496-2 - Coding of
          audio-visual objects, Part 2: Visual</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2003" />
        </front>
      </reference>

      <reference anchor="14496-3">
        <front>
          <title>ISO/IEC International Standard 14496-3 - Coding of
          audio-visual objects, Part 3 Audio</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2009" />
        </front>
      </reference>

      <reference anchor="14496-3/Amd.1">
        <front>
          <title>ISO/IEC International Standard 14496-3 - Coding of
          audio-visual objects, Part 3: Audio, Amendment 1: HD-AAC
          profile and MPEG Surround signaling</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2009" />
        </front>
      </reference>

      <reference anchor="23003-1">
        <front>
          <title>ISO/IEC International Standard 23003-1 - MPEG Surround (MPEG
          D)</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2007" />
        </front>
      </reference>
    </references>

    <references title="Informative References">

      <reference anchor="H245">
        <front>
          <title>International Telecommunications Union, "CONTROL PROTOCOL
          FOR MULTIMEDIA COMMUNICATION", ITU Recommendation H.245, December 2009</title>

          <author initials="" surname="ITU">
            <organization />
          </author>

          <date year="2009" />
        </front>
      </reference>

      <reference anchor="H261">
        <front>
          <title>International Telecommunications Union, "Video codec for
          audiovisual services at p x 64 kbit/s", ITU Recommendation
          H.261, March 1993</title>

          <author initials="" surname="ITU">
            <organization />
          </author>

          <date year="1993" />
        </front>
      </reference>

      <reference anchor="H323">
        <front>
          <title>International Telecommunications Union, "Packet-based
          multimedia communications systems", ITU Recommendation
          H.323, December 2009</title>

          <author initials="" surname="ITU">
            <organization />
          </author>

          <date year="2009" />
        </front>
      </reference>

      <reference anchor="14496-1">
        <front>
          <title>ISO/IEC International Standard 14496-1 - Coding of
          audio-visual objects, Part 1 Systems</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2004" />
        </front>
      </reference>

      <reference anchor="14496-3:1999/Amd.1:2000">
        <front>
          <title>ISO/IEC International Standard 14496-3 - Coding of
          audio-visual objects, Part 3 Audio, Amendment 1: Audio extensions</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>

          <date year="2000" />
        </front>
      </reference>

      <reference anchor="14496-12">
        <front>
          <title>ISO/IEC International Standard 14496-12 - Coding of
          audio-visual objects, Part 12 ISO base media file format</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>
        </front>
      </reference>

      <reference anchor="14496-14">
        <front>
          <title>ISO/IEC International Standard 14496-14 - Coding of
          audio-visual objects, Part 12 MP4 file format</title>

          <author initials="" surname="MPEG">
            <organization />
          </author>
        </front>
      </reference>

      &RFC2198;
      &RFC3016;
      &RFC3261;
      &RFC3640;
      &RFC3711;
      &RFC4301;
      &RFC4628;
      &RFC5109;
      &RFC5246;
      &RFC5691;

      <reference anchor="3GPP">
        <front>
          <title>3rd Generation Partnership Project;
                                                Technical Specification Group Services and System Aspects;
                                                Transparent end-to-end Packet-switched
                                                Streaming Service (PSS);
                                                Protocols and codecs
                                                (Release 9)</title>

          <author initials="" surname="3GPP">
            <organization />
          </author>
          <date year="2010" month="December"/>
        </front>
        <seriesInfo name="3GPP TS" value="26.234 V9.5.0" />
      </reference>

    </references>

  </back>
</rfc>

PAFTECH AB 2003-20262026-04-24 02:56:45