One document matched: draft-briscoe-tsvwg-aqm-tcpm-rmcat-l4s-problem-01.xml
<?xml version="1.0" encoding="US-ASCII"?>
<!-- This template is for creating an Internet Draft using xml2rfc,
which is available here: http://xml.resource.org. -->
<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
<!-- One method to get references from the online citation libraries.
There has to be one entity for each item to be referenced.
An alternate method (rfc include) is described in the references. -->
<!ENTITY RFC2119 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2119.xml">
<!ENTITY RFC0970 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.0970.xml">
<!ENTITY RFC2309 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2309.xml">
<!ENTITY RFC2474 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2474.xml">
<!ENTITY RFC3540 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3540.xml">
<!ENTITY RFC6660 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6660.xml">
<!ENTITY RFC2983 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.2983.xml">
<!ENTITY RFC3246 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3246.xml">
<!ENTITY RFC3168 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3168.xml">
<!ENTITY RFC3649 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.3649.xml">
<!ENTITY RFC4340 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4340.xml">
<!ENTITY RFC4960 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4960.xml">
<!ENTITY RFC4774 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.4774.xml">
<!ENTITY RFC5562 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5562.xml">
<!ENTITY RFC5681 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.5681.xml">
<!ENTITY RFC6077 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6077.xml">
<!ENTITY RFC6679 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.6679.xml">
<!ENTITY I-D.ietf-tsvwg-ecn-encap-guidelines SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-tsvwg-ecn-encap-guidelines.xml">
<!ENTITY RFC7560 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7560.xml">
<!ENTITY I-D.ietf-tcpm-accurate-ecn SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-tcpm-accurate-ecn.xml">
<!ENTITY I-D.ietf-aqm-pie SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-aqm-pie.xml">
<!ENTITY I-D.ietf-aqm-fq-codel SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-aqm-fq-codel.xml">
<!ENTITY I-D.ietf-tcpm-dctcp SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-tcpm-dctcp.xml">
<!ENTITY I-D.ietf-tcpm-cubic SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.ietf-tcpm-cubic.xml">
<!ENTITY I-D.sridharan-tcpm-ctcp SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.sridharan-tcpm-ctcp.xml">
<!ENTITY I-D.moncaster-tcpm-rcv-cheat SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.moncaster-tcpm-rcv-cheat.xml">
<!ENTITY RFC7713 SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml/reference.RFC.7713.xml">
<!ENTITY I-D.briscoe-aqm-dualq-coupled SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.briscoe-aqm-dualq-coupled.xml">
<!ENTITY I-D.briscoe-tsvwg-ecn-l4s-id SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.briscoe-tsvwg-ecn-l4s-id.xml">
<!ENTITY I-D.stewart-tsvwg-sctpecn SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.stewart-tsvwg-sctpecn.xml">
<!ENTITY I-D.khademi-tcpm-alternativebackoff-ecn SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.khademi-tcpm-alternativebackoff-ecn.xml">
<!ENTITY I-D.you-encrypted-traffic-management SYSTEM "http://xml2rfc.ietf.org/public/rfc/bibxml3/reference.I-D.you-encrypted-traffic-management.xml">
]>
<?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
<!-- used by XSLT processors -->
<!-- For a complete list and description of processing instructions (PIs),
please see http://xml.resource.org/authoring/README.html. -->
<!-- Below are generally applicable Processing Instructions (PIs) that most I-Ds might want to use.
(Here they are set differently than their defaults in xml2rfc v1.32) -->
<?rfc strict="yes" ?>
<!-- give errors regarding ID-nits and DTD validation -->
<!-- control the table of contents (ToC) -->
<?rfc toc="yes"?>
<!-- generate a ToC -->
<?rfc tocdepth="4"?>
<!-- the number of levels of subsections in ToC. default: 3 -->
<!-- control references -->
<?rfc symrefs="yes"?>
<!-- use symbolic references tags, i.e, [RFC2119] instead of [1] -->
<?rfc sortrefs="yes" ?>
<!-- sort the reference entries alphabetically -->
<!-- control vertical white space
(using these PIs as follows is recommended by the RFC Editor) -->
<?rfc compact="yes" ?>
<!-- do not start each main section on a new page -->
<?rfc subcompact="no" ?>
<!-- keep one blank line between list items -->
<!-- end of list of popular I-D processing instructions -->
<rfc category="info"
docName="draft-briscoe-tsvwg-aqm-tcpm-rmcat-l4s-problem-01"
ipr="trust200902" obsoletes="">
<!-- category values: std, bcp, info, exp, and historic
ipr values: trust200902, noModificationTrust200902, noDerivativesTrust200902,
or pre5378Trust200902
you can add the attributes updates="NNNN" and obsoletes="NNNN"
they will automatically be output with "(if approved)" -->
<!-- ***** FRONT MATTER ***** -->
<front>
<!-- The abbreviated title is used in the page header - it is only necessary if the
full title is longer than 39 characters -->
<title abbrev="L4S Problem Statement">Low Latency, Low Loss, Scalable
Throughput (L4S) Internet Service: Problem Statement</title>
<author fullname="Bob Briscoe" initials="B." role="editor"
surname="Briscoe">
<organization>Simula Research Lab</organization>
<address>
<postal>
<street/>
</postal>
<email>ietf@bobbriscoe.net</email>
<uri>http://bobbriscoe.net/</uri>
</address>
</author>
<author fullname="Koen De Schepper" initials="K." surname="De Schepper">
<organization>Nokia Bell Labs</organization>
<address>
<postal>
<street/>
<city>Antwerp</city>
<country>Belgium</country>
</postal>
<email>koen.de_schepper@nokia.com</email>
<uri>https://www.bell-labs.com/usr/koen.de_schepper</uri>
</address>
</author>
<author fullname="Marcelo Bagnulo" initials="M." surname="Bagnulo Braun">
<organization>Universidad Carlos III de Madrid</organization>
<address>
<postal>
<street>Av. Universidad 30</street>
<city>Leganes, Madrid 28911</city>
<country>Spain</country>
</postal>
<phone>34 91 6249500</phone>
<email>marcelo@it.uc3m.es</email>
<uri>http://www.it.uc3m.es</uri>
</address>
</author>
<date day="" month="" year="2016"/>
<area>Transport</area>
<workgroup>Transport Services (tsv)</workgroup>
<workgroup>Active Queue Management (aqm)</workgroup>
<workgroup>TCP Maintenance (tcpm)</workgroup>
<workgroup>Real-Time Media Congestion Avoidance Techniques
(rmcat)</workgroup>
<keyword>Internet-Draft</keyword>
<keyword>I-D</keyword>
<abstract>
<t>This document motivates a new service that the Internet could provide
to eventually replace best efforts for all traffic: Low Latency, Low
Loss, Scalable throughput (L4S). It is becoming common for <spanx
style="emph">all</spanx> (or most) applications being run by a user at
any one time to require low latency. However, the only solution the IETF
can offer for ultra-low queuing delay is Diffserv, which only favours a
minority of packets at the expense of others. In extensive testing the
new L4S service keeps average queuing delay under a millisecond for
<spanx style="emph">all</spanx> applications even under very heavy load,
without sacrificing utilization; and it keeps congestion loss to zero.
It is becoming widely recognized that adding more access capacity gives
diminishing returns, because latency is becoming the critical problem.
Even with a high capacity broadband access, the reduced latency of L4S
remarkably and consistently improves performance under load for
applications such as interactive video, conversational video, voice,
Web, gaming, instant messaging, remote desktop and cloud-based apps
(even when all being used at once over the same access link). The
insight is that the root cause of queuing delay is in TCP, not in the
queue. By fixing the sending TCP (and other transports) queuing latency
becomes so much better than today that operators will want to deploy the
network part of L4S to enable new products and services. Further, the
network part is simple to deploy - incrementally with zero-config. Both
parts, sender and network, ensure coexistence with other legacy traffic.
At the same time L4S solves the long-recognized problem with the future
scalability of TCP throughput. </t>
<t>This document explains the underlying problems that have been
preventing the Internet from enjoying such performance improvements. It
then outlines the parts necessary for a solution and the steps that will
be needed to standardize them. It points out opportunities that will
open up, and sets out some likely use-cases, including ultra-low latency
interaction with cloud processing over the public Internet.</t>
</abstract>
</front>
<middle>
<section anchor="l4sps_intro" title="Introduction">
<section title="The Application Performance Problem">
<t>It is increasingly common for <spanx style="emph">all</spanx> of a
user's applications at any one time to require low delay: interactive
Web, Web services, voice, conversational video, interactive video,
instant messaging, online gaming, remote desktop and cloud-based
applications. In the last decade or so, much has been done to reduce
propagation delay by placing caches or servers closer to users.
However, queuing remains a major, albeit intermittent, component of
latency. When present it typically doubles the path delay from that
due to the base speed-of-light. Low loss is also important because,
for interactive applications, losses translate into even longer
retransmission delays.</t>
<t>It has been demonstrated that, once access network bit rates reach
levels now common in the developed world, increasing capacity offers
diminishing returns if latency (delay) is not addressed.
Differentiated services (Diffserv) offers Expedited Forwarding <xref
target="RFC3246"/> for some packets at the expense of others, but this
is not applicable when all (or most) of a user's applications require
low latency.</t>
<t>Therefore, the goal is an Internet service with ultra-Low queueing
Latency, ultra-Low Loss and Scalable throughput (L4S) - for <spanx
style="emph">all</spanx> traffic. Having motivated the goal of 'L4S
for all', this document enumerates the problems that have to be
overcome to reach it.</t>
<t>It must be said that queuing delay only degrades performance
infrequently <xref target="Hohlfeld14"/>. It only occurs when a large
enough capacity-seeking (e.g. TCP) flow is running alongside the
user's traffic in the bottleneck link, which is typically in the
access network. Or when the low latency application is itself a large
capacity-seeking flow (e.g. interactive video). At these times, the
performance improvement must be so remarkable that network operators
will be motivated to deploy it.</t>
</section>
<section title="The Technology Problem">
<t>Active Queue Management (AQM) is part of the solution to queuing
under load. AQM improves performance for all traffic, but there is a
limit to how much queuing delay can be reduced by solely changing the
network; without addressing the root of the problem.</t>
<t>The root of the problem is the presence of standard TCP congestion
control (Reno <xref target="RFC5681"/>) or compatible variants (e.g.
TCP Cubic <xref target="I-D.ietf-tcpm-cubic"/>). We shall call this
family of congestion controls 'Classic' TCP. It has been demonstrated
that if the sending host replaces Classic TCP with a 'Scalable'
alternative, when a suitable AQM is deployed in the network the
performance under load of all the above interactive applications can
be stunningly improved. For instance, queuing delay under heavy load
with the example DCTCP/DualQ solution cited below is roughly 1
millisecond (1 ms) at the 99th percentile without losing link
utilization. This compares with 5 to 20 ms on <spanx style="emph">average</spanx>
with a Classic TCP and current state-of-the-art AQMs such as
fq_CoDel <xref target="I-D.ietf-aqm-fq-codel"/> or PIE <xref
target="I-D.ietf-aqm-pie"/>. Also, with a Classic TCP, 5 ms of queuing
is usually only possible by losing some utilization.</t>
<t>It has been convincingly demonstrated <xref target="DCttH15"/> that
it is possible to deploy such an L4S service alongside the existing
best efforts service so that all of a user's applications can shift to
it when their stack is updated. Access networks are typically designed
with one link as the bottleneck for each site (which might be a home,
small enterprise or mobile device), so deployment at a single node
should give nearly all the benefit. Although the main incremental
deployment problem has been solved, and the remaining work seems
straightforward, there may need to be changes in approach during the
process of engineering a complete solution.</t>
<t>There are three main parts to the L4S approach (illustrated in
<xref target="l4sps_fig_components"/>):<list style="hanging">
<t hangText="2) Network:">The L4S service needs to be isolated
from the queuing latency of the Classic service. However, the two
should be able to freely share a common pool of capacity. This is
because there is no way to predict how many flows at any one time
might use each service and capacity in access networks is too
scarce to partition into two. So a 'semi-permeable' membrane is
needed that partitions latency but not bandwidth. The Dual Queue
Coupled AQM <xref target="I-D.briscoe-aqm-dualq-coupled"/> is an
example of such a semi-permeable membrane.<vspace
blankLines="1"/>Per-flow queuing such as in <xref
target="I-D.ietf-aqm-fq-codel"/> could be used, but it partitions
both latency and bandwdith between every e2e flow. So it is rather
overkill, which brings disadvantages (see <xref
target="l4sps_why-not"/>), not least that thousands of queues are
needed when two are sufficient.</t>
<t hangText="1) Protocol:">A host needs to distinguish L4S and
Classic packets with an identifier so that the network can
classify them into their separate treatments. <xref
target="I-D.briscoe-tsvwg-ecn-l4s-id"/> considers various
alternative identifiers, and concludes that all alternatives
involve compromises, but the ECT(1) codepoint of the ECN field is
a workable solution.</t>
<t hangText="3) Host:">Scalable congestion controls already exist.
They solve the scaling problem with TCP first pointed out in <xref
target="RFC3649"/>. The one used most widely (in controlled
environments) is Data Centre TCP (DCTCP <xref
target="I-D.ietf-tcpm-dctcp"/>), which has been implemented and
deployed in Windows Server Editions (since 2012), in Linux and in
FreeBSD. Although DCTCP as-is 'works' well over the public
Internet, most implementations lack certain safety features that
will be necessary once it is used outside controlled environments
like data centres (see later). A similar scalable congestion
control will also need to be transplanted into protocols other
than TCP (SCTP, RTP/RTCP, RMCAT, etc.)</t>
</list></t>
<figure align="center" anchor="l4sps_fig_components"
title="Components of an L4S Solution: 1) Isolation in separate network queues; 2) Packet Identification Protocol; and 3) Scalable Sending Host">
<artwork align="center"><![CDATA[ (1) (2)
.-------^------. .--------------^-------------------.
,-(3)-----. ______
; ________ : L4S --------. | |
:|Scalable| : _\ ||___\_| mark |
:| sender | : __________ / / || / |______|\ _________
:|________|\; | |/ --------' ^ \1| |
`---------'\__| IP-ECN | Coupling : \|priority |_\
________ / |Classifier| : /|scheduler| /
|Classic |/ |__________|\ --------. ___:__ / |_________|
| sender | \_\ || | |||___\_| mark/|/
|________| / || | ||| / | drop |
Classic --------' |______|
]]></artwork>
</figure>
</section>
<section anchor="l4sps_Terminology" title="Terminology">
<t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
document are to be interpreted as described in <xref
target="RFC2119"/>. In this document, these words will appear with
that interpretation only when in ALL CAPS. Lower case uses of these
words are not to be interpreted as carrying RFC-2119 significance.</t>
<t><list style="hanging">
<t hangText="Classic service:">The 'Classic' service is intended
for all the congestion control behaviours that currently co-exist
with TCP Reno (e.g. TCP Cubic, Compound, SCTP, etc).</t>
<t
hangText="Low-Latency, Low-Loss and Scalable (L4S) service:">The
'L4S' service is intended for traffic from scalable TCP algorithms
such as Data Centre TCP. But it is also more general—it will
allow a set of congestion controls with similar scaling properties
to DCTCP (e.g. Relentless <xref target="Mathis09"/>) to
evolve.<vspace blankLines="1"/>Both Classic and L4S services can
cope with a proportion of unresponsive or less-responsive traffic
as well (e.g. DNS, VoIP, etc).</t>
<t hangText="Scalable Congestion Control:">A congestion control
where flow rate is inversely proportional to the level of
congestion signals. Then, as flow rate scales, the number of
congestion signals per round trip remains invariant, maintaining
the same degree of control. For instance, DCTCP averages 2
congestion signals per round-trip whatever the flow rate.</t>
<t hangText="Classic Congestion Control:">A congestion control
with a flow rate compatible with standard TCP Reno <xref
target="RFC5681"/>. With Classic congestion controls, as capacity
increases enabling higher flow rates, the number of round trips
between congestion signals (losses or ECN marks) rises in
proportion to the flow rate. So control of queuing and/or
utilization becomes very slack. For instance, with 1500 B packets
and an RTT of 18 ms, as TCP Reno flow rate increases from 2 to 100
Mb/s the number of round trips between congestion signals rises
proportionately, from 2 to 100. <vspace blankLines="1"/>The
default congestion control in Linux (TCP Cubic) is Reno-compatible
for most scenarios expected for some years. For instance, with a
typical domestic round-trip time (RTT) of 18ms, TCP Cubic only
switches out of Reno-compatibility mode once the flow rate
approaches 1 Gb/s. For a typical data centre RTT of 1 ms, the
switch-over point is theoretically 1.3 Tb/s. However, with a less
common transcontinental RTT of 100 ms, it only remains
Reno-compatible up to 13 Mb/s. All examples assume 1,500 B
packets.</t>
<t hangText="Classic ECN:">The original proposed standard Explicit
Congestion Notification (ECN) protocol <xref target="RFC3168"/>,
which requires ECN signals to be treated the same as drops, both
when generated in the network and when responded to by the
sender.</t>
<t hangText="Site:">A home, mobile device, small enterprise or
campus, where the network bottleneck is typically the access link
to the site. Not all network arrangements fit this model but it is
a useful, widely applicable generalisation.</t>
</list></t>
</section>
<section title="The Standardization Problem">
<t><list counter="0" style="hanging">
<t hangText="0) Architecture:">The first step will be to
articulate the structure and interworking requirements of the set
of parts that would satisfy the overall application performance
requirements.</t>
</list>Then specific interworking aspects of the following three
components parts will need to be defined:<list style="hanging">
<t hangText="1) Protocol:"><list style="letters">
<t><xref target="I-D.briscoe-tsvwg-ecn-l4s-id"/> recommends
ECT(1) is used as the identifier to classify L4S and Classic
packets into their separate treatments, as required by <xref
target="RFC4774"/>. The draft also points out that the
original experimental assignment of this codepoint as an ECN
nonce <xref target="RFC3540"/> needs to be made obsolete (it
was never deployed, and it offers no security benefit now that
deployment is optional).</t>
<t>An essential aspect of a scalable congestion control is the
use of explicit congestion signals rather than losses, because
the signals need to be sent immediately and
frequently—too often to use drops. 'Classic' ECN <xref
target="RFC3168"/> requires an ECN signal to be treated the
same as a drop, both when it is generated in the network and
when it is responded to by hosts. L4S allows networks and
hosts to support two separate meanings for ECN. So the
standards track <xref target="RFC3168"/> will need to be
updated to allow ECT(1) packets to depart from the 'same as
drop' constraint.</t>
</list></t>
<t hangText="2) Network:">The Dual Queue Coupled AQM has been
specified as generically as possible <xref
target="I-D.briscoe-aqm-dualq-coupled"/> as a 'semi-permeable'
membrane without specifying the particular AQMs to use in the two
queues. An informational appendix of the draft is provided for
pseudocode examples of different possible AQM approaches.
Initially a zero-config variant of RED called Curvy RED was
implemented, tested and documented. A variant of PIE has been
implemented and tested and is about to be documented. The aim is
for designers to be free to implement diverse ideas. So the brief
normative body of the draft only specifies the minimum constraints
an AQM needs to comply with to ensure that the L4S and Classic
services will coexist.</t>
<t hangText="3) Host:"><list style="letters">
<t>Data Centre TCP is the most widely used example of a
scalable congestion control. It is being documented in the
TCPM WG as an informational record of the protocol currently
in use <xref target="I-D.ietf-tcpm-dctcp"/>. It will be
necessary to define a number of safety features for a variant
usable on the public Internet. A draft list of these, known as
the TCP Prague requirements, has been drawn up (see <xref
target="l4sps_tcp-prague-reqs"/>).</t>
<t>Transport protocols other than TCP use various congestion
controls designed to be friendly with Classic TCP. It will be
necessary to implement scalable variants of each of these
transport behaviours before they can use the L4S service. The
following standards track RFCs currently define these
protocols, and they will need to be updated to allow a
different congestion response, which they will have to
indicate by using the ECT(1) codepoint: ECN in TCP <xref
target="RFC3168"/>, in SCTP <xref target="RFC4960"/>, in RTP
<xref target="RFC6679"/>, and in DCCP <xref
target="RFC4340"/>.</t>
<t>ECN feedback is sufficient for L4S in some transport
protocols (RTCP, DCCP) but not others:<list style="symbols">
<t>For the case of TCP, the feedback protocol for ECN
embeds the assumption from Classic ECN that it is the same
as drop, making it unusable for a scalable TCP. Therefore,
the implementation of TCP receivers will have to be
upgraded <xref target="RFC7560"/>. Work to standardize
more accurate ECN feedback for TCP (AccECN <xref
target="I-D.ietf-tcpm-accurate-ecn"/>) is already in
progress.</t>
<t>ECN feedback is only roughly sketched in an appendix of
the SCTP specification. A fuller specification has been
proposed <xref target="I-D.stewart-tsvwg-sctpecn"/>, which
would need to be implemented and deployed.</t>
</list></t>
</list></t>
</list></t>
<t>Currently, the new specification of the ECN protocol <xref
target="I-D.briscoe-tsvwg-ecn-l4s-id"/> has been written for the
experimental track. Perhaps a better approach would be to make this a
standards track protocol draft that updates the definition of ECT(1)
in all the above standards track RFCs and obsoletes its experimental
use for the ECN nonce. Then experimental specifications of example
network (AQM) and host (congestion control) algorithms can be
written.</t>
</section>
</section>
<section title="Rationale">
<t/>
<section title="Why These Primary Components?">
<t><list style="hanging">
<t hangText="Explicit congestion signalling (protocol):">Explicit
congestion signalling is a key part of the L4S approach. In
contrast, use of drop as a congestion signal creates a tension
because drop is both a useful signal (more would reduce delay) and
an impairment (less would reduce delay). Explicit congestion
signals can be used many times per round trip, to keep tight
control, without any impairment. Under heavy load, even more
explicit signals can be applied so the queue can be kept short
whatever the load. Whereas state-of-the-art AQMs have to introduce
very high packet drop at high load to keep the queue short.
Further, TCP's sawtooth reduction can be smaller, and therefore
return to the operating point more often, without worrying that
this causes more signals (one at the top of each smaller
sawtooth). The consequent smaller amplitude sawteeth fit between a
very shallow marking threshold and an empty queue, so delay
variation can be very low, without risk of under-utilization.
<vspace blankLines="1"/>All the above makes it clear that explicit
congestion signalling is only advantageous for latency if it does
not have to be considered 'the same as' drop (as required with
Classic ECN <xref target="RFC3168"/>). Before Classic ECN was
standardized, there were various proposals to give an ECN mark a
different meaning from drop. However, there was no particular
reason to agree on any one of the alternative meanings, so 'the
same as drop' was the only compromise that could be reached. RFC
3168 contains a statement that:<list style="empty">
<t>"An environment where all end nodes were ECN-Capable could
allow new criteria to be developed for setting the CE
codepoint, and new congestion control mechanisms for end-node
reaction to CE packets. However, this is a research issue, and
as such is not addressed in this document."</t>
</list></t>
<t
hangText="Latency isolation with coupled congestion notification (network):">Using
just two queues is not essential to L4S (more would be possible),
but it is the simplest way to isolate all the L4S traffic that
keeps latency low from all the legacy Classic traffic that does
not.<vspace blankLines="1"/>Similarly, coupling the congestion
notification between the queues is not necessarily essential, but
it is a clever and simple way to allow senders to determine their
rate, packet-by-packet, rather than be overridden by a network
scheduler. Because otherwise a network scheduler would have to
inspect at least transport layer headers, and it would have to
continually assign a rate to each flow without any easy way to
understand application intent.</t>
<t hangText="L4S packet identifier (protocol):">Once there are at
least two separate treatments in the network, hosts need an
identifier at the IP layer to distinguish which treatment they
intend to use.</t>
<t hangText="Scalable congestion notification (host):">A scalable
congestion control keeps the signalling frequency high so that
rate variations can be small when signalling is stable, and rate
can track variations in available capacity as rapidly as possible
otherwise.</t>
</list></t>
</section>
<section anchor="l4sps_why-not" title="Why Not Alternative Approaches?">
<t>All the following approaches address some part of the same problem
space as L4S. In each case, it is shown that L4S complements them or
improves on them, rather than being a mutually exclusive
alternative:<list style="hanging">
<t hangText="Diffserv:">Diffserv addresses the problem of
bandwidth apportionment for important traffic as well as queuing
latency for delay-sensitive traffic. L4S solely addresses the
problem of queuing latency. Diffserv will still be necessary where
important traffic requires priority (e.g. for commercial reasons,
or for protection of critical infrastructure traffic).
Nonetheless, if there are Diffserv classes for important traffic,
the L4S approach can provide low latency for <spanx style="emph">all</spanx>
traffic within each Diffserv class (including the case where there
is only one Diffserv class).<vspace blankLines="1"/>Also, as
already explained, Diffserv only works for a small subset of the
traffic on a link. It is not applicable when all the applications
in use at one time at a single site (home, small business or
mobile device) require low latency. Also, because L4S is for all
traffic, it needs none of the management baggage (traffic
policing, traffic contracts) associated with favouring some
packets over others. This baggage has held Diffserv back from
widespread end-to-end deployment.</t>
<t hangText="State-of-the-art AQMs:">AQMs such as PIE and fq_CoDel
give a significant reduction in queuing delay relative to no AQM
at all. The L4S work is intended to complement these AQMs, and we
definitely do not want to distract from the need to deploy them as
widely as possible. Nonetheless, without addressing the large
saw-toothing rate variations of Classic congestion controls, AQMs
alone cannot reduce queuing delay too far without significantly
reducing link utilization. The L4S approach resolves this tension
by ensuring hosts can minimize the size of their sawteeth without
appearing so aggressive to legacy flows that they starve.</t>
<t hangText="Per-flow queuing:">Similarly per-flow queuing is not
incompatible with the L4S approach. However, one queue for every
flow can be thought of as overkill compared to the minimum of two
queues for all traffic needed for the L4S approach. The overkill
of per-flow queuing has side-effects:<list style="letters">
<t>fq makes high performance networking equipment costly
(processing and memory) - in contrast dual queue code can be
very simple;</t>
<t>fq requires packet inspection into the end-to-end transport
layer, which doesn't sit well alongside encryption for privacy
- in contrast a dual queue only operates at the IP layer;</t>
<t>fq decides packet-by-packet which flow to schedule without
knowing application intent. In contrast, in the L4S approach
the sender still controls the relative rate of each flow
dependent on the needs of each application.</t>
</list></t>
<t hangText="Alternative Back-off ECN (ABE):">Yet again, L4S is
not an alternative to ABE but a complement that introduces much
lower queuing delay. ABE <xref
target="I-D.khademi-tcpm-alternativebackoff-ecn"/> alters the host
behaviour in response to ECN marking to utilize a link better and
give ECN flows a faster throughput, but it assumes the network
still treats ECN and drop the same. Therefore ABE exploits any
lower queuing delay that AQMs can provide. But as explained above,
AQMs still cannot reduce queuing delay too far without losing link
utilization (for other non-ABE flows).</t>
</list></t>
</section>
</section>
<section title="Opportunities">
<t>A transport layer that solves the current latency issues will provide
new service, product and application opportunities.</t>
<t>With the L4S approach, the following existing applications will
immediately experience significantly better quality of experience under
load in the best effort class: <list style="symbols">
<t>Gaming</t>
<t>VoIP</t>
<t>Video conferencing</t>
<t>Web browsing</t>
<t>(Adaptive) video streaming</t>
<t>Instant messaging</t>
</list></t>
<t>The significantly lower queuing latency also enables some interactive
application functions to be offloaded to the cloud that would hardly
even be usable today: <list style="symbols">
<t>Cloud based interactive video</t>
<t>Cloud based virtual and augmented reality</t>
</list></t>
<t>The above two applications have been successfully demonstrated with
L4S, both running together over a 40 Mb/s broadband access link loaded
up with the numerous other latency sensitive applications in the
previous list as well as numerous downloads. A panoramic video of a
football stadium can be swiped and pinched so that on the fly a proxy in
the cloud generates a sub-window of the match video under the
finger-gesture control of each user. At the same time, a virtual reality
headset fed from a 360 degree camera in a racing car has been
demonstrated, where the user's head movements control the scene
generated in the cloud. In both cases, with 7 ms end-to-end base delay,
the additional queuing delay of roughly 1 ms is so low that it seems the
video is generated locally. See https://riteproject.eu/dctth/ for videos
of these demonstrations.</t>
<t>Using a swiping finger gesture or head movement to pan a video are
extremely demanding applications—far more demanding than VoIP.
Because human vision can detect extremely low delays of the order of
single milliseconds when delay is translated into a visual lag between a
video and a reference point (the finger or the orientation of the
head).</t>
<t>If low network delay is not available, all fine interaction has to be
done locally and therefore much more redundant data has to be
downloaded. When all interactive processing can be done in the cloud,
only the data to be rendered for the end user needs to be sent. Whereas,
once applications can rely on minimal queues in the network, they can
focus on reducing their own latency by only minimizing the application
send queue.</t>
<!--{ToDo: This para needs to be explained better} Also lower network layers can finally be further optimized for low latency and stable throughput. Today it is not cost efficient, as the largest part of the traffic (classic best effort) needs to allow "big" queues anyway (up to several 100s of milliseconds) to make classic congestion control work correctly. While technology is known and feasible to support low latency with reliable throughput (even mobile), it is today not considered as economically relevant, as best effort can absorb any burst, delay or throughput variations without end-users experiencing any difference from the normal tay-to-day operation due to congestion control limitations.-->
<section title="Use Cases">
<t>The following use-cases for L4S are being considered by various
interested parties:<list style="symbols">
<t>Where the bottleneck is one of various types of access network:
DSL, cable, mobile, satellite<list style="symbols">
<t>Radio links (cellular, WiFi) that are distant from the
source are particularly challenging. The radio link capacity
can vary rapidly by orders of magnitude, so it is often
desirable to hold a buffer to utilise sudden increases of
capacity;</t>
<t>cellular networks are further complicated by a perceived
need to buffer in order to make hand-overs imperceptible;</t>
<t>Satellite networks generally have a very large base RTT, so
even with minimal queuing, overall delay can never be
extremely low;</t>
<t>Nonetheless, it is certainly desirable not to hold a buffer
purely because of the sawteeth of Classic TCP, when it is more
than is needed for all the above reasons.</t>
</list></t>
<t>Private networks of heterogeneous data centres, where there is
no single administrator that can arrange for all the simultaneous
changes to senders, receivers and network needed to deploy
DCTCP:<list style="symbols">
<t>a set of private data centres interconnected over a wide
area with separate administrations, but within the same
company</t>
<t>a set of data centres operated by separate companies
interconnected by a community of interest network (e.g. for
the finance sector)</t>
<t>multi-tenant (cloud) data centres where tenants choose
their operating system stack (Infrastructure as a Service -
IaaS)</t>
</list></t>
<t>Different types of transport (or application) congestion
control:<list>
<t>elastic (TCP/SCTP);</t>
<t>real-time (RTP, RMCAT);</t>
<t>query (DNS/LDAP).</t>
</list></t>
<t>Where low delay quality of service is required, but without
inspecting or intervening above the IP layer <xref
target="I-D.you-encrypted-traffic-management"/>:<list>
<t>mobile and other networks have tended to inspect higher
layers in order to guess application QoS requirements.
However, with growing demand for support of privacy and
encryption, L4S offers an alternative. There is no need to
select which traffic to favour for queuing, when L4S gives
favourable queuing to all traffic.</t>
</list></t>
</list></t>
</section>
</section>
<section anchor="l4sps_IANA" title="IANA Considerations">
<t>This specification contains no IANA considerations.</t>
</section>
<section anchor="l4sps_Security_Considerations"
title="Security Considerations">
<section title="Traffic (Non-)Policing">
<t>Because the L4S service can serve all traffic that is using the
capacity of a link, it should not be necessary to police access to the
L4S service. In contrast, Diffserv only works if some packets get less
favourable treatement than others. So it has to use traffic policers
to limit how much traffic can be favoured, In turn, traffic policers
require traffic contracts between users and networks as well as
pairwise between networks. Because L4S will lack all this management
complexity, it is more likely to work end-to-end.</t>
<t>During early deployment (and perhaps always), some networks will
not offer the L4S service. These networks do not need to police or
re-mark L4S traffic - they just forward it unchanged as best efforts
traffic, as they would already forward traffic with ECT(1) today. At a
bottleneck, such networks will introduce some queuing and dropping.
When a scalable congestion control detects a drop it will have to
respond as if it is a Classic congestion control (see item 3-1 in
<xref target="l4sps_tcp-prague-reqs"/>). This will ensure safe
interworking with other traffic at the 'legacy' bottleneck.</t>
<t>Certain network operators might choose to restict access to the L4S
class, perhaps only to customers who have paid a premium. In the
packet classifer (item 2 in <xref target="l4sps_fig_components"/>),
they could identify such customers using some other field than ECN
(e.g. source address range), and just ignore the L4S identifier for
non-paying customers. This would ensure that the L4S identifier
survives end-to-end even though the service does not have to be
supported at every hop. Such arrangements would only require simple
registered/not-registered packet classification, rather than the
managed application-specific traffic policing against
customer-specific traffic contracts that Diffserv requires.</t>
</section>
<section title="'Latency Friendliness'">
<t>The L4S service does rely on self-constraint - not in terms of
limiting capacity usage, but in terms of limiting burstiness. It is
believed that standardisation of dynamic behaviour (cf. TCP
slow-start) and self-interest will be sufficient to prevent transports
from sending excessive bursts of L4S traffic, given the application's
own latency will suffer most from such behaviour.</t>
<t>Whether burst policing becomes necessary remains to be seen.
Without it, there will be potential for attacks on the low latency of
the L4S service. However it may only be necessary to apply such
policing reactively, e.g. punitively targeted at any deployments of
new bursty malware.</t>
</section>
<section title="ECN Integrity">
<t>Receiving hosts can fool a sender into downloading faster by
suppressing feedback of ECN marks (or of losses if retransmissions are
not necessary or available otherwise). <xref target="RFC3540"/>
proposes that a TCP sender could pseudorandomly set either of ECT(0)
or ECT(1) in each packet of a flow and remember the sequence it had
set, termed the ECN nonce. If the receiver supports the nonce, it can
prove that it is not suppressing feedback by reflecting its knowledge
of the sequence back to the sender. The nonce was proposed on the
assumption that receivers might be more likely to cheat congestion
control than senders (although senders also have a motive to
cheat).</t>
<t>If L4S uses the ECT(1) codepoint of ECN for packet classification,
it will have to obsolete the experimental nonce. As far as is known,
the ECN Nonce has never been deployed, and it was only implemented for
a couple of testbed evaluations. It would be nearly impossible to
deploy now, because any misbehaving receiver can simply opt-out, which
would be unremarkable given all receivers currently opt-out.</t>
<t>Other ways to protect TCP feedback integrity have since been
developed. For instance:<list style="symbols">
<t>the sender can test the integrity of the receiver's feedback by
occasionally setting the IP-ECN field to a value normally only set
by the network. Then it can test whether the receiver's feedback
faithfully reports what it expects <xref
target="I-D.moncaster-tcpm-rcv-cheat"/>. This method consumes no
extra codepoints. It works for loss and it will work for ECN
feedback in any transport protocol suitable for L4S. However, it
shares the same assumption as the nonce; that the sender is not
cheating and it is motivated to prevent the receiver cheating;</t>
<t>A network can enforce a congestion response to its ECN markings
(or packet losses) by auditing congestion exposure (ConEx) <xref
target="RFC7713"/>. Whether the receiver or a downstream network
is suppressing congestion feedback or the sender is unresponsive
to the feedback, or both, ConEx audit can neutralise any advantage
that any of these three parties would otherwise gain. ConEx is
only currently defined for IPv6 and consumes a destination option
header. It has been implemented, but not deployed as far as is
known.</t>
</list></t>
</section>
</section>
<section title="Acknowledgements">
<t/>
</section>
</middle>
<!-- *****BACK MATTER ***** -->
<back>
<references title="Normative References">
&RFC2119;
</references>
<references title="Informative References">
&RFC3168;
&RFC4774;
&RFC6679;
&RFC3540;
&RFC3246;
&RFC3649;
&RFC4340;
&RFC4960;
&RFC5681;
&RFC7560;
&I-D.ietf-tcpm-accurate-ecn;
&I-D.ietf-aqm-pie;
&I-D.ietf-aqm-fq-codel;
&I-D.moncaster-tcpm-rcv-cheat;
&RFC7713;
&I-D.briscoe-aqm-dualq-coupled;
&I-D.briscoe-tsvwg-ecn-l4s-id;
&I-D.stewart-tsvwg-sctpecn;
&I-D.ietf-tcpm-dctcp;
&I-D.ietf-tcpm-cubic;
&I-D.khademi-tcpm-alternativebackoff-ecn;
&I-D.you-encrypted-traffic-management;
<reference anchor="Hohlfeld14">
<front>
<title>A QoE Perspective on Sizing Network Buffers</title>
<author fullname="Oliver Hohlfeld" initials="O." surname="Hohlfeld ">
<organization/>
</author>
<author fullname="Enric Pujol" initials="E." surname="Pujol">
<organization/>
<address>
<postal>
<street/>
<city/>
<region/>
<code/>
<country/>
</postal>
<phone/>
<facsimile/>
<email/>
<uri/>
</address>
</author>
<author fullname="Florin Ciucu" initials="F." surname="Ciucu">
<organization/>
<address>
<postal>
<street/>
<city/>
<region/>
<code/>
<country/>
</postal>
<phone/>
<facsimile/>
<email/>
<uri/>
</address>
</author>
<author fullname="Anja Feldmann" initials="A." surname="Feldmann">
<organization/>
<address>
<postal>
<street/>
<city/>
<region/>
<code/>
<country/>
</postal>
<phone/>
<facsimile/>
<email/>
<uri/>
</address>
</author>
<author fullname="Paul Barford" initials="P." surname="Barford">
<organization/>
<address>
<postal>
<street/>
<city/>
<region/>
<code/>
<country/>
</postal>
<phone/>
<facsimile/>
<email/>
<uri/>
</address>
</author>
<date month="November" year="2014"/>
</front>
<seriesInfo name="Proc. ACM Internet Measurement Conf (IMC'14)"
value="hmm"/>
<format target="http://doi.acm.org/10.1145/2663716.2663730" type="PDF"/>
</reference>
<reference anchor="Mathis09"
target="http://www.hpcc.jp/pfldnet2009/Program_files/1569198525.pdf">
<front>
<title>Relentless Congestion Control</title>
<author fullname="Matt Mathis" initials="M." surname="Mathis">
<organization>PSC</organization>
</author>
<date month="May" year="2009"/>
</front>
<seriesInfo name="PFLDNeT'09" value=""/>
<format target="http://www.hpcc.jp/pfldnet2009/Program_files/1569198525.pdf"
type="PDF"/>
</reference>
<!--{ToDo: DCttH ref will need to be updated, once stable}-->
<reference anchor="DCttH15"
target="http://www.bobbriscoe.net/projects/latency/dctth_preprint.pdf">
<front>
<title>'Data Centre to the Home': Ultra-Low Latency for All</title>
<author fullname="Koen De Schepper" initials="K."
surname="De Schepper">
<organization>Bell Labs</organization>
</author>
<author fullname="Olga Bondarenko" initials="O."
surname="Bondarenko">
<organization>Simula Research Lab</organization>
</author>
<author fullname="Bob Briscoe" initials="B." surname="Briscoe">
<organization>BT</organization>
</author>
<author fullname="Ing-jyh Tsang" initials="I." surname="Tsang">
<organization>Bell Labs</organization>
</author>
<date year="2015"/>
</front>
<annotation>(Under submission)</annotation>
</reference>
<reference anchor="TCP-sub-mss-w"
target="http://www.bobbriscoe.net/projects/latency/sub-mss-w.pdf">
<front>
<title>Scaling TCP's Congestion Window for Small Round Trip
Times</title>
<author fullname="Bob Briscoe" initials="B." surname="Briscoe">
<organization>BT</organization>
</author>
<author fullname="Koen De Schepper" initials="K."
surname="De Schepper">
<organization>Bell Labs</organization>
</author>
<date month="May" year="2015"/>
</front>
<seriesInfo name="BT Technical Report" value="TR-TUB8-2015-002"/>
<format target="http://www.bobbriscoe.net/projects/latency/sub-mss-w.pdf"
type="PDF"/>
</reference>
<reference anchor="TCPPrague">
<front>
<title>Notes: DCTCP evolution 'bar BoF': Tue 21 Jul 2015, 17:40,
Prague</title>
<author fullname="Bob Briscoe" initials="B." surname="Briscoe">
<organization>Simula</organization>
</author>
<date month="July" year="2015"/>
</front>
<seriesInfo name="tcpprague mailing list archive" value=""/>
<format target="https://mailarchive.ietf.org/arch/msg/tcpprague/mwWncQg3egPd15FItYWiEvRDrvA"
type="HTML"/>
</reference>
<reference anchor="NewCC_Proc">
<front>
<title>Experimental Specification of New Congestion Control
Algorithms</title>
<author fullname="Lars Eggert" initials="L." surname="Eggert">
<organization>Nokia Research Centre</organization>
</author>
<date month="July" year="2007"/>
</front>
<seriesInfo name="IETF Operational Note" value="ion-tsv-alt-cc"/>
<format target="https://www.ietf.org/iesg/statement/congestion-control.html"
type="HTML"/>
</reference>
</references>
<section anchor="l4sps_tcp-prague-reqs"
title="The "TCP Prague Requirements"">
<t>This list of requirements was produced at an ad hoc meeting during
IETF-94 in Prague <xref target="TCPPrague"/>. The list prioritised
features that would need to be added to DCTCP to make it safe for use on
the public Internet alongside existing non-DCTCP traffic (up to #3-7).
After that, it also includes features to improve the performance of
DCTCP in the wider range of conditions found on the public Internet.</t>
<t>The table is too wide for the ASCII draft format, so it has been
split into two, with a common column of row index numbers on the
left.</t>
<t>The references should be consulted for why each requirement is
considered necessary for safety. There follow brief reasons for those
that are not self-explanatory and have not yet been written up:<list
style="hanging">
<t hangText="#3-5 Reduce RTT-dependence:">Classic TCP's throughput
is known to be inversely proportional to RTT. One would expect flows
over very low RTT paths to nearly starve flows over larger RTTs.
However, because Classic TCP induces a large queue, it has never
allowed a very low RTT path to exist, so far. For instance, consider
two paths with base RTT 1ms and 100ms. If Classic TCP induces a 20ms
queue, it turns these RTTs into 21ms and 120ms leading to a
throughput ratio of about 1:6. Whereas if a Scalable TCP induces
only a 1ms queue, the ratio is 2:101. Therefore, with small queues,
long RTT flows will essentially starve.</t>
<t hangText="Smooth ECN feedback over own RTT:">DCTCP currently
smooths feedback over a hard-coded number of segments, with the
value optimized for data centres. For the wider range of round-trip
times on the public Internet, it needs to smooth over roughly one
window of packets. Otherwise it could respond too rapidly (or too
sluggishly) and become unstable (or unresponsive).</t>
</list></t>
<t>The columns in the second part of the table have the following
meanings:<list style="hanging">
<t hangText="WG:">The IETF WG most relevant to this requirement. The
"tcpm/iccrg" combination refers to the procedure typically used for
congestion control changes, where tcpm owns the approval decision,
but uses the iccrg for expert review <xref
target="NewCC_Proc"/>;</t>
<t hangText="TCP:">Applicable to all forms of TCP congestion
control;</t>
<t hangText="DCTCP:">Applicable to Data Centre TCP as currently used
(in controlled environments);</t>
<t hangText="DCTCP bis:">Applicable to an future Data Centre TCP
congestion control intended for controlled environments;</t>
<t hangText="XXX Prague:">Applicable to a Scalable variant of XXX
(TCP/SCTP/RMCAT) congestion control.</t>
</list></t>
<texttable>
<ttcol>Req #</ttcol>
<ttcol>Requirement</ttcol>
<ttcol>Reference</ttcol>
<c>0</c>
<c>ARCHITECTURE</c>
<c/>
<c>1</c>
<c>L4S IDENTIFIER</c>
<c><xref target="I-D.briscoe-tsvwg-ecn-l4s-id"/></c>
<c>2</c>
<c>DUAL QUEUE AQM</c>
<c><xref target="I-D.briscoe-aqm-dualq-coupled"/></c>
<c/>
<c/>
<c/>
<c/>
<c>SCALABLE TRANSPORT - SAFETY ADDITIONS</c>
<c/>
<c>3-1</c>
<c>Fall back to Reno/Cubic on loss</c>
<c><xref target="I-D.ietf-tcpm-dctcp"/></c>
<c>3-2</c>
<c>Suitable ECN Feedback</c>
<c><xref target="I-D.ietf-tcpm-accurate-ecn"/>, <xref
target="I-D.stewart-tsvwg-sctpecn"/>.</c>
<c>3-4</c>
<c>Scaling TCP's Congestion Window for Small Round Trip Times</c>
<c><xref target="TCP-sub-mss-w"/></c>
<c>3-5</c>
<c>Reduce RTT-dependence</c>
<c/>
<c/>
<c/>
<c/>
<c>3-6</c>
<c>Smooth ECN feedback over own RTT</c>
<c/>
<c>3-7</c>
<c>Fall back to Reno/Cubic if classic ECN bottleneck detected</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>SCALABLE TRANSPORT - PERFORMANCE ENHANCEMENTS</c>
<c/>
<c>3-8</c>
<c>Faster-than-additive increase</c>
<c/>
<c>3-9</c>
<c>Less drastic exit from slow-start</c>
<c/>
</texttable>
<texttable>
<ttcol>#</ttcol>
<ttcol>WG</ttcol>
<ttcol>TCP</ttcol>
<ttcol>DCTCP</ttcol>
<ttcol>DCTCP-bis</ttcol>
<ttcol>TCP Prague</ttcol>
<ttcol>SCTP Prague</ttcol>
<ttcol>RMCAT Prague</ttcol>
<c>0</c>
<c>tsvwg?</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>1</c>
<c>tsvwg?</c>
<c/>
<c/>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>2</c>
<c>aqm?</c>
<c>n/a</c>
<c>n/a</c>
<c>n/a</c>
<c>n/a</c>
<c>n/a</c>
<c>n/a</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>3-1</c>
<c>tcpm</c>
<c/>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>3-2</c>
<c>tcpm</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>n/a</c>
<c>n/a</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>3-4</c>
<c>tcpm</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>?</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>3-5</c>
<c>tcpm/ iccrg?</c>
<c/>
<c/>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>?</c>
<c>3-6</c>
<c>tcpm/ iccrg?</c>
<c/>
<c>?</c>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>?</c>
<c>3-7</c>
<c>tcpm/ iccrg?</c>
<c/>
<c/>
<c/>
<c>Y</c>
<c>Y</c>
<c>?</c>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c/>
<c>3-8</c>
<c>tcpm/ iccrg?</c>
<c/>
<c/>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>?</c>
<c>3-9</c>
<c>tcpm/ iccrg?</c>
<c/>
<c/>
<c>Y</c>
<c>Y</c>
<c>Y</c>
<c>?</c>
</texttable>
</section>
<!-- <section title="Change Log (to be Deleted before Publication)">
<t>A detailed version history can be accessed at
<http://datatracker.ietf.org/doc/draft-briscoe-aqm-ecn-roadmap/history/></t>
<t><list style="hanging">
<t hangText="From briscoe-...-00 to briscoe-...-01:">Technical
changes:<list style="symbols">
<t/>
</list>Editorial changes:<list style="symbols">
<t/>
</list></t>
</list></t>
</section>
-->
</back>
</rfc>
| PAFTECH AB 2003-2026 | 2026-04-22 16:44:30 |