init
This commit is contained in:
156
java/org/apache/coyote/http2/AbstractStream.java
Normal file
156
java/org/apache/coyote/http2/AbstractStream.java
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
/**
|
||||
* Used to managed prioritisation.
|
||||
*/
|
||||
abstract class AbstractStream {
|
||||
|
||||
private static final Log log = LogFactory.getLog(AbstractStream.class);
|
||||
private static final StringManager sm = StringManager.getManager(AbstractStream.class);
|
||||
|
||||
private final Integer identifier;
|
||||
|
||||
private volatile AbstractStream parentStream = null;
|
||||
private final Set<Stream> childStreams =
|
||||
Collections.newSetFromMap(new ConcurrentHashMap<Stream,Boolean>());
|
||||
private long windowSize = ConnectionSettingsBase.DEFAULT_INITIAL_WINDOW_SIZE;
|
||||
|
||||
|
||||
public AbstractStream(Integer identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
|
||||
public Integer getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
|
||||
public int getIdAsInt() {
|
||||
return identifier.intValue();
|
||||
}
|
||||
|
||||
|
||||
void detachFromParent() {
|
||||
if (parentStream != null) {
|
||||
parentStream.getChildStreams().remove(this);
|
||||
parentStream = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
final void addChild(Stream child) {
|
||||
child.setParentStream(this);
|
||||
childStreams.add(child);
|
||||
}
|
||||
|
||||
|
||||
boolean isDescendant(AbstractStream stream) {
|
||||
// Is the passed in Stream a descendant of this Stream?
|
||||
// Start at the passed in Stream and work up
|
||||
AbstractStream parent = stream.getParentStream();
|
||||
while (parent != null && parent != this) {
|
||||
parent = parent.getParentStream();
|
||||
}
|
||||
return parent != null;
|
||||
}
|
||||
|
||||
|
||||
AbstractStream getParentStream() {
|
||||
return parentStream;
|
||||
}
|
||||
|
||||
|
||||
void setParentStream(AbstractStream parentStream) {
|
||||
this.parentStream = parentStream;
|
||||
}
|
||||
|
||||
|
||||
final Set<Stream> getChildStreams() {
|
||||
return childStreams;
|
||||
}
|
||||
|
||||
|
||||
protected synchronized void setWindowSize(long windowSize) {
|
||||
this.windowSize = windowSize;
|
||||
}
|
||||
|
||||
|
||||
protected synchronized long getWindowSize() {
|
||||
return windowSize;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Increment window size.
|
||||
* @param increment The amount by which the window size should be increased
|
||||
* @throws Http2Exception If the window size is now higher than
|
||||
* the maximum allowed
|
||||
*/
|
||||
protected synchronized void incrementWindowSize(int increment) throws Http2Exception {
|
||||
// No need for overflow protection here.
|
||||
// Increment can't be more than Integer.MAX_VALUE and once windowSize
|
||||
// goes beyond 2^31-1 an error is triggered.
|
||||
windowSize += increment;
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("abstractStream.windowSizeInc", getConnectionId(),
|
||||
getIdentifier(), Integer.toString(increment), Long.toString(windowSize)));
|
||||
}
|
||||
|
||||
if (windowSize > ConnectionSettingsBase.MAX_WINDOW_SIZE) {
|
||||
String msg = sm.getString("abstractStream.windowSizeTooBig", getConnectionId(), identifier,
|
||||
Integer.toString(increment), Long.toString(windowSize));
|
||||
if (identifier.intValue() == 0) {
|
||||
throw new ConnectionException(msg, Http2Error.FLOW_CONTROL_ERROR);
|
||||
} else {
|
||||
throw new StreamException(
|
||||
msg, Http2Error.FLOW_CONTROL_ERROR, identifier.intValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected synchronized void decrementWindowSize(int decrement) {
|
||||
// No need for overflow protection here. Decrement can never be larger
|
||||
// the Integer.MAX_VALUE and once windowSize goes negative no further
|
||||
// decrements are permitted
|
||||
windowSize -= decrement;
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("abstractStream.windowSizeDec", getConnectionId(),
|
||||
getIdentifier(), Integer.toString(decrement), Long.toString(windowSize)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected abstract String getConnectionId();
|
||||
|
||||
protected abstract int getWeight();
|
||||
|
||||
@Deprecated // Unused
|
||||
protected abstract void doNotifyAll();
|
||||
}
|
||||
94
java/org/apache/coyote/http2/ByteUtil.java
Normal file
94
java/org/apache/coyote/http2/ByteUtil.java
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
/**
|
||||
* Utility class for extracting values from byte arrays.
|
||||
*/
|
||||
public class ByteUtil {
|
||||
|
||||
private ByteUtil() {
|
||||
// Hide default constructor
|
||||
}
|
||||
|
||||
|
||||
public static boolean isBit7Set(byte input) {
|
||||
return (input & 0x80) != 0;
|
||||
}
|
||||
|
||||
|
||||
public static int get31Bits(byte[] input, int firstByte) {
|
||||
return ((input[firstByte] & 0x7F) << 24) + ((input[firstByte + 1] & 0xFF) << 16) +
|
||||
((input[firstByte + 2] & 0xFF) << 8) + (input[firstByte + 3] & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static void set31Bits(byte[] output, int firstByte, int value) {
|
||||
output[firstByte] = (byte) ((value & 0x7F000000) >> 24);
|
||||
output[firstByte + 1] = (byte) ((value & 0xFF0000) >> 16);
|
||||
output[firstByte + 2] = (byte) ((value & 0xFF00) >> 8);
|
||||
output[firstByte + 3] = (byte) (value & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static int getOneByte(byte[] input, int pos) {
|
||||
return (input[pos] & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static int getTwoBytes(byte[] input, int firstByte) {
|
||||
return ((input[firstByte] & 0xFF) << 8) + (input[firstByte + 1] & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static int getThreeBytes(byte[] input, int firstByte) {
|
||||
return ((input[firstByte] & 0xFF) << 16) + ((input[firstByte + 1] & 0xFF) << 8) +
|
||||
(input[firstByte + 2] & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static void setOneBytes(byte[] output, int firstByte, int value) {
|
||||
output[firstByte] = (byte) (value & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static void setTwoBytes(byte[] output, int firstByte, int value) {
|
||||
output[firstByte] = (byte) ((value & 0xFF00) >> 8);
|
||||
output[firstByte + 1] = (byte) (value & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static void setThreeBytes(byte[] output, int firstByte, int value) {
|
||||
output[firstByte] = (byte) ((value & 0xFF0000) >> 16);
|
||||
output[firstByte + 1] = (byte) ((value & 0xFF00) >> 8);
|
||||
output[firstByte + 2] = (byte) (value & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static long getFourBytes(byte[] input, int firstByte) {
|
||||
return ((long)(input[firstByte] & 0xFF) << 24) + ((input[firstByte + 1] & 0xFF) << 16) +
|
||||
((input[firstByte + 2] & 0xFF) << 8) + (input[firstByte + 3] & 0xFF);
|
||||
}
|
||||
|
||||
|
||||
public static void setFourBytes(byte[] output, int firstByte, long value) {
|
||||
output[firstByte] = (byte) ((value & 0xFF000000) >> 24);
|
||||
output[firstByte + 1] = (byte) ((value & 0xFF0000) >> 16);
|
||||
output[firstByte + 2] = (byte) ((value & 0xFF00) >> 8);
|
||||
output[firstByte + 3] = (byte) (value & 0xFF);
|
||||
}
|
||||
}
|
||||
34
java/org/apache/coyote/http2/ConnectionException.java
Normal file
34
java/org/apache/coyote/http2/ConnectionException.java
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
/**
|
||||
* Thrown when an HTTP/2 connection error occurs.
|
||||
*/
|
||||
public class ConnectionException extends Http2Exception {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
ConnectionException(String msg, Http2Error error) {
|
||||
super(msg, error);
|
||||
}
|
||||
|
||||
|
||||
ConnectionException(String msg, Http2Error error, Throwable cause) {
|
||||
super(msg, error, cause);
|
||||
}
|
||||
}
|
||||
220
java/org/apache/coyote/http2/ConnectionSettingsBase.java
Normal file
220
java/org/apache/coyote/http2/ConnectionSettingsBase.java
Normal file
@@ -0,0 +1,220 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
public abstract class ConnectionSettingsBase<T extends Throwable> {
|
||||
|
||||
private final Log log = LogFactory.getLog(ConnectionSettingsBase.class); // must not be static
|
||||
private final StringManager sm = StringManager.getManager(ConnectionSettingsBase.class);
|
||||
|
||||
private final String connectionId;
|
||||
|
||||
// Limits
|
||||
protected static final int MAX_WINDOW_SIZE = (1 << 31) - 1;
|
||||
protected static final int MIN_MAX_FRAME_SIZE = 1 << 14;
|
||||
protected static final int MAX_MAX_FRAME_SIZE = (1 << 24) - 1;
|
||||
protected static final long UNLIMITED = ((long)1 << 32); // Use the maximum possible
|
||||
protected static final int MAX_HEADER_TABLE_SIZE = 1 << 16;
|
||||
|
||||
// Defaults (defined by the specification)
|
||||
protected static final int DEFAULT_HEADER_TABLE_SIZE = Hpack.DEFAULT_TABLE_SIZE;
|
||||
protected static final boolean DEFAULT_ENABLE_PUSH = true;
|
||||
protected static final long DEFAULT_MAX_CONCURRENT_STREAMS = UNLIMITED;
|
||||
protected static final int DEFAULT_INITIAL_WINDOW_SIZE = (1 << 16) - 1;
|
||||
protected static final int DEFAULT_MAX_FRAME_SIZE = MIN_MAX_FRAME_SIZE;
|
||||
protected static final long DEFAULT_MAX_HEADER_LIST_SIZE = 1 << 15;
|
||||
|
||||
protected Map<Setting,Long> current = new HashMap<>();
|
||||
protected Map<Setting,Long> pending = new HashMap<>();
|
||||
|
||||
|
||||
public ConnectionSettingsBase(String connectionId) {
|
||||
this.connectionId = connectionId;
|
||||
// Set up the defaults
|
||||
current.put(Setting.HEADER_TABLE_SIZE, Long.valueOf(DEFAULT_HEADER_TABLE_SIZE));
|
||||
current.put(Setting.ENABLE_PUSH, Long.valueOf(DEFAULT_ENABLE_PUSH ? 1 : 0));
|
||||
current.put(Setting.MAX_CONCURRENT_STREAMS, Long.valueOf(DEFAULT_MAX_CONCURRENT_STREAMS));
|
||||
current.put(Setting.INITIAL_WINDOW_SIZE, Long.valueOf(DEFAULT_INITIAL_WINDOW_SIZE));
|
||||
current.put(Setting.MAX_FRAME_SIZE, Long.valueOf(DEFAULT_MAX_FRAME_SIZE));
|
||||
current.put(Setting.MAX_HEADER_LIST_SIZE, Long.valueOf(DEFAULT_MAX_HEADER_LIST_SIZE));
|
||||
}
|
||||
|
||||
|
||||
public void set(Setting setting, long value) throws T {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("connectionSettings.debug",
|
||||
connectionId, getEndpointName(), setting, Long.toString(value)));
|
||||
}
|
||||
|
||||
switch(setting) {
|
||||
case HEADER_TABLE_SIZE:
|
||||
validateHeaderTableSize(value);
|
||||
break;
|
||||
case ENABLE_PUSH:
|
||||
validateEnablePush(value);
|
||||
break;
|
||||
case MAX_CONCURRENT_STREAMS:
|
||||
// No further validation required
|
||||
break;
|
||||
case INITIAL_WINDOW_SIZE:
|
||||
validateInitialWindowSize(value);
|
||||
break;
|
||||
case MAX_FRAME_SIZE:
|
||||
validateMaxFrameSize(value);
|
||||
break;
|
||||
case MAX_HEADER_LIST_SIZE:
|
||||
// No further validation required
|
||||
break;
|
||||
case UNKNOWN:
|
||||
// Unrecognised. Ignore it.
|
||||
log.warn(sm.getString("connectionSettings.unknown",
|
||||
connectionId, setting, Long.toString(value)));
|
||||
return;
|
||||
}
|
||||
|
||||
set(setting, Long.valueOf(value));
|
||||
}
|
||||
|
||||
|
||||
synchronized void set(Setting setting, Long value) {
|
||||
current.put(setting, value);
|
||||
}
|
||||
|
||||
|
||||
public int getHeaderTableSize() {
|
||||
return getMinInt(Setting.HEADER_TABLE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
public boolean getEnablePush() {
|
||||
long result = getMin(Setting.ENABLE_PUSH);
|
||||
return result != 0;
|
||||
}
|
||||
|
||||
|
||||
public long getMaxConcurrentStreams() {
|
||||
return getMax(Setting.MAX_CONCURRENT_STREAMS);
|
||||
}
|
||||
|
||||
|
||||
public int getInitialWindowSize() {
|
||||
return getMaxInt(Setting.INITIAL_WINDOW_SIZE);
|
||||
}
|
||||
|
||||
|
||||
public int getMaxFrameSize() {
|
||||
return getMaxInt(Setting.MAX_FRAME_SIZE);
|
||||
}
|
||||
|
||||
|
||||
public long getMaxHeaderListSize() {
|
||||
return getMax(Setting.MAX_HEADER_LIST_SIZE);
|
||||
}
|
||||
|
||||
|
||||
private synchronized long getMin(Setting setting) {
|
||||
Long pendingValue = pending.get(setting);
|
||||
long currentValue = current.get(setting).longValue();
|
||||
if (pendingValue == null) {
|
||||
return currentValue;
|
||||
} else {
|
||||
return Math.min(pendingValue.longValue(), currentValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private synchronized int getMinInt(Setting setting) {
|
||||
long result = getMin(setting);
|
||||
if (result > Integer.MAX_VALUE) {
|
||||
return Integer.MAX_VALUE;
|
||||
} else {
|
||||
return (int) result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private synchronized long getMax(Setting setting) {
|
||||
Long pendingValue = pending.get(setting);
|
||||
long currentValue = current.get(setting).longValue();
|
||||
if (pendingValue == null) {
|
||||
return currentValue;
|
||||
} else {
|
||||
return Math.max(pendingValue.longValue(), currentValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private synchronized int getMaxInt(Setting setting) {
|
||||
long result = getMax(setting);
|
||||
if (result > Integer.MAX_VALUE) {
|
||||
return Integer.MAX_VALUE;
|
||||
} else {
|
||||
return (int) result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void validateHeaderTableSize(long headerTableSize) throws T {
|
||||
if (headerTableSize > MAX_HEADER_TABLE_SIZE) {
|
||||
String msg = sm.getString("connectionSettings.headerTableSizeLimit",
|
||||
connectionId, Long.toString(headerTableSize));
|
||||
throwException(msg, Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void validateEnablePush(long enablePush) throws T {
|
||||
// Can't be less than zero since the result of the byte->long conversion
|
||||
// will never be negative
|
||||
if (enablePush > 1) {
|
||||
String msg = sm.getString("connectionSettings.enablePushInvalid",
|
||||
connectionId, Long.toString(enablePush));
|
||||
throwException(msg, Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void validateInitialWindowSize(long initialWindowSize) throws T {
|
||||
if (initialWindowSize > MAX_WINDOW_SIZE) {
|
||||
String msg = sm.getString("connectionSettings.windowSizeTooBig",
|
||||
connectionId, Long.toString(initialWindowSize), Long.toString(MAX_WINDOW_SIZE));
|
||||
throwException(msg, Http2Error.FLOW_CONTROL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void validateMaxFrameSize(long maxFrameSize) throws T {
|
||||
if (maxFrameSize < MIN_MAX_FRAME_SIZE || maxFrameSize > MAX_MAX_FRAME_SIZE) {
|
||||
String msg = sm.getString("connectionSettings.maxFrameSizeInvalid",
|
||||
connectionId, Long.toString(maxFrameSize), Integer.toString(MIN_MAX_FRAME_SIZE),
|
||||
Integer.toString(MAX_MAX_FRAME_SIZE));
|
||||
throwException(msg, Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
abstract void throwException(String msg, Http2Error error) throws T;
|
||||
|
||||
abstract String getEndpointName();
|
||||
}
|
||||
108
java/org/apache/coyote/http2/ConnectionSettingsLocal.java
Normal file
108
java/org/apache/coyote/http2/ConnectionSettingsLocal.java
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Represents the local connection settings i.e. the settings the client is
|
||||
* expected to use when communicating with the server. There will be a delay
|
||||
* between calling a setter and the setting taking effect at the client. When a
|
||||
* setter is called, the new value is added to the set of pending settings. Once
|
||||
* the ACK is received, the new value is moved to the current settings. While
|
||||
* waiting for the ACK, the getters will return the most lenient / generous /
|
||||
* relaxed of the current setting and the pending setting. This class does not
|
||||
* validate the values passed to the setters. If an invalid value is used the
|
||||
* client will respond (almost certainly by closing the connection) as defined
|
||||
* in the HTTP/2 specification.
|
||||
*/
|
||||
public class ConnectionSettingsLocal extends ConnectionSettingsBase<IllegalArgumentException> {
|
||||
|
||||
private static final String ENDPOINT_NAME = "Local(client->server)";
|
||||
|
||||
private boolean sendInProgress = false;
|
||||
|
||||
|
||||
public ConnectionSettingsLocal(String connectionId) {
|
||||
super(connectionId);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected synchronized void set(Setting setting, Long value) {
|
||||
checkSend();
|
||||
if (current.get(setting).longValue() == value.longValue()) {
|
||||
pending.remove(setting);
|
||||
} else {
|
||||
pending.put(setting, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
synchronized byte[] getSettingsFrameForPending() {
|
||||
checkSend();
|
||||
int payloadSize = pending.size() * 6;
|
||||
byte[] result = new byte[9 + payloadSize];
|
||||
|
||||
ByteUtil.setThreeBytes(result, 0, payloadSize);
|
||||
result[3] = FrameType.SETTINGS.getIdByte();
|
||||
// No flags
|
||||
// Stream is zero
|
||||
// Payload
|
||||
int pos = 9;
|
||||
for (Map.Entry<Setting,Long> setting : pending.entrySet()) {
|
||||
ByteUtil.setTwoBytes(result, pos, setting.getKey().getId());
|
||||
pos += 2;
|
||||
ByteUtil.setFourBytes(result, pos, setting.getValue().longValue());
|
||||
pos += 4;
|
||||
}
|
||||
sendInProgress = true;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
synchronized boolean ack() {
|
||||
if (sendInProgress) {
|
||||
sendInProgress = false;
|
||||
current.putAll(pending);
|
||||
pending.clear();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void checkSend() {
|
||||
if (sendInProgress) {
|
||||
// Coding error. No need for i18n
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
void throwException(String msg, Http2Error error) throws IllegalArgumentException {
|
||||
throw new IllegalArgumentException(msg);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
final String getEndpointName() {
|
||||
return ENDPOINT_NAME;
|
||||
}
|
||||
}
|
||||
42
java/org/apache/coyote/http2/ConnectionSettingsRemote.java
Normal file
42
java/org/apache/coyote/http2/ConnectionSettingsRemote.java
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
/**
|
||||
* Represents the remote connection settings: i.e. the settings the server must
|
||||
* use when communicating with the client.
|
||||
*/
|
||||
public class ConnectionSettingsRemote extends ConnectionSettingsBase<ConnectionException> {
|
||||
|
||||
private static final String ENDPOINT_NAME = "Remote(server->client)";
|
||||
|
||||
public ConnectionSettingsRemote(String connectionId) {
|
||||
super(connectionId);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
void throwException(String msg, Http2Error error) throws ConnectionException {
|
||||
throw new ConnectionException(msg, error);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
final String getEndpointName() {
|
||||
return ENDPOINT_NAME;
|
||||
}
|
||||
}
|
||||
39
java/org/apache/coyote/http2/Constants.java
Normal file
39
java/org/apache/coyote/http2/Constants.java
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
public class Constants {
|
||||
|
||||
// Prioritisation
|
||||
public static final int DEFAULT_WEIGHT = 16;
|
||||
|
||||
// Parsing
|
||||
static final int DEFAULT_HEADER_READ_BUFFER_SIZE = 1024;
|
||||
|
||||
// Header frame size
|
||||
// TODO: Is 1k the optimal value?
|
||||
static final int DEFAULT_HEADERS_FRAME_SIZE = 1024;
|
||||
// TODO: Is 64 too big? Just the status header with compression
|
||||
static final int DEFAULT_HEADERS_ACK_FRAME_SIZE = 64;
|
||||
|
||||
// Limits
|
||||
static final int DEFAULT_MAX_COOKIE_COUNT = 200;
|
||||
static final int DEFAULT_MAX_HEADER_COUNT = 100;
|
||||
static final int DEFAULT_MAX_HEADER_SIZE = 8 * 1024;
|
||||
static final int DEFAULT_MAX_TRAILER_COUNT = 100;
|
||||
static final int DEFAULT_MAX_TRAILER_SIZE = 8 * 1024;
|
||||
}
|
||||
49
java/org/apache/coyote/http2/Flags.java
Normal file
49
java/org/apache/coyote/http2/Flags.java
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
public class Flags {
|
||||
|
||||
private Flags() {
|
||||
// Utility class. Hide default constructor
|
||||
}
|
||||
|
||||
|
||||
public static boolean isEndOfStream(int flags) {
|
||||
return (flags & 0x01) != 0;
|
||||
}
|
||||
|
||||
|
||||
public static boolean isAck(int flags) {
|
||||
return (flags & 0x01) != 0;
|
||||
}
|
||||
|
||||
|
||||
public static boolean isEndOfHeaders(int flags) {
|
||||
return (flags & 0x04) != 0;
|
||||
}
|
||||
|
||||
|
||||
public static boolean hasPadding(int flags) {
|
||||
return (flags & 0x08) != 0;
|
||||
}
|
||||
|
||||
|
||||
public static boolean hasPriority(int flags) {
|
||||
return (flags & 0x20) != 0;
|
||||
}
|
||||
}
|
||||
138
java/org/apache/coyote/http2/FrameType.java
Normal file
138
java/org/apache/coyote/http2/FrameType.java
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
public enum FrameType {
|
||||
|
||||
DATA (0, false, true, null, false),
|
||||
HEADERS (1, false, true, null, true),
|
||||
PRIORITY (2, false, true, equals(5), false),
|
||||
RST (3, false, true, equals(4), false),
|
||||
SETTINGS (4, true, false, dividableBy(6), true),
|
||||
PUSH_PROMISE (5, false, true, greaterOrEquals(4), true),
|
||||
PING (6, true, false, equals(8), false),
|
||||
GOAWAY (7, true, false, greaterOrEquals(8), false),
|
||||
WINDOW_UPDATE (8, true, true, equals(4), true),
|
||||
CONTINUATION (9, false, true, null, true),
|
||||
UNKNOWN (256, true, true, null, false);
|
||||
|
||||
private static final StringManager sm = StringManager.getManager(FrameType.class);
|
||||
|
||||
private final int id;
|
||||
private final boolean streamZero;
|
||||
private final boolean streamNonZero;
|
||||
private final IntPredicate payloadSizeValidator;
|
||||
private final boolean payloadErrorFatal;
|
||||
|
||||
|
||||
private FrameType(int id, boolean streamZero, boolean streamNonZero,
|
||||
IntPredicate payloadSizeValidator, boolean payloadErrorFatal) {
|
||||
this.id = id;
|
||||
this.streamZero = streamZero;
|
||||
this.streamNonZero = streamNonZero;
|
||||
this.payloadSizeValidator = payloadSizeValidator;
|
||||
this.payloadErrorFatal = payloadErrorFatal;
|
||||
}
|
||||
|
||||
|
||||
public byte getIdByte() {
|
||||
return (byte) id;
|
||||
}
|
||||
|
||||
|
||||
public void check(int streamId, int payloadSize) throws Http2Exception {
|
||||
// Is FrameType valid for the given stream?
|
||||
if (streamId == 0 && !streamZero || streamId != 0 && !streamNonZero) {
|
||||
throw new ConnectionException(sm.getString("frameType.checkStream", this),
|
||||
Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
|
||||
// Is the payload size valid for the given FrameType
|
||||
if (payloadSizeValidator != null && !payloadSizeValidator.test(payloadSize)) {
|
||||
if (payloadErrorFatal || streamId == 0) {
|
||||
throw new ConnectionException(sm.getString("frameType.checkPayloadSize",
|
||||
Integer.toString(payloadSize), this),
|
||||
Http2Error.FRAME_SIZE_ERROR);
|
||||
} else {
|
||||
throw new StreamException(sm.getString("frameType.checkPayloadSize",
|
||||
Integer.toString(payloadSize), this),
|
||||
Http2Error.FRAME_SIZE_ERROR, streamId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static FrameType valueOf(int i) {
|
||||
switch(i) {
|
||||
case 0:
|
||||
return DATA;
|
||||
case 1:
|
||||
return HEADERS;
|
||||
case 2:
|
||||
return PRIORITY;
|
||||
case 3:
|
||||
return RST;
|
||||
case 4:
|
||||
return SETTINGS;
|
||||
case 5:
|
||||
return PUSH_PROMISE;
|
||||
case 6:
|
||||
return PING;
|
||||
case 7:
|
||||
return GOAWAY;
|
||||
case 8:
|
||||
return WINDOW_UPDATE;
|
||||
case 9:
|
||||
return CONTINUATION;
|
||||
default:
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
private interface IntPredicate {
|
||||
boolean test(int x);
|
||||
}
|
||||
|
||||
private static IntPredicate greaterOrEquals(final int y) {
|
||||
return new IntPredicate() {
|
||||
@Override
|
||||
public boolean test(int x) {
|
||||
return x >= y;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static IntPredicate equals(final int y) {
|
||||
return new IntPredicate() {
|
||||
@Override
|
||||
public boolean test(int x) {
|
||||
return x == y;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static IntPredicate dividableBy(final int y) {
|
||||
return new IntPredicate() {
|
||||
@Override
|
||||
public boolean test(int x) {
|
||||
return x % y == 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
585
java/org/apache/coyote/http2/HPackHuffman.java
Normal file
585
java/org/apache/coyote/http2/HPackHuffman.java
Normal file
@@ -0,0 +1,585 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
public class HPackHuffman {
|
||||
|
||||
protected static final StringManager sm = StringManager.getManager(HPackHuffman.class);
|
||||
|
||||
private static final HuffmanCode[] HUFFMAN_CODES;
|
||||
|
||||
/**
|
||||
* array based tree representation of a huffman code.
|
||||
* <p/>
|
||||
* the high two bytes corresponds to the tree node if the bit is set, and the low two bytes for if it is clear
|
||||
* if the high bit is set it is a terminal node, otherwise it contains the next node position.
|
||||
*/
|
||||
private static final int[] DECODING_TABLE;
|
||||
|
||||
private static final int LOW_TERMINAL_BIT = (0b10000000) << 8;
|
||||
private static final int HIGH_TERMINAL_BIT = (0b10000000) << 24;
|
||||
private static final int LOW_MASK = 0b0111111111111111;
|
||||
|
||||
|
||||
static {
|
||||
|
||||
HuffmanCode[] codes = new HuffmanCode[257];
|
||||
|
||||
codes[0] = new HuffmanCode(0x1ff8, 13);
|
||||
codes[1] = new HuffmanCode(0x7fffd8, 23);
|
||||
codes[2] = new HuffmanCode(0xfffffe2, 28);
|
||||
codes[3] = new HuffmanCode(0xfffffe3, 28);
|
||||
codes[4] = new HuffmanCode(0xfffffe4, 28);
|
||||
codes[5] = new HuffmanCode(0xfffffe5, 28);
|
||||
codes[6] = new HuffmanCode(0xfffffe6, 28);
|
||||
codes[7] = new HuffmanCode(0xfffffe7, 28);
|
||||
codes[8] = new HuffmanCode(0xfffffe8, 28);
|
||||
codes[9] = new HuffmanCode(0xffffea, 24);
|
||||
codes[10] = new HuffmanCode(0x3ffffffc, 30);
|
||||
codes[11] = new HuffmanCode(0xfffffe9, 28);
|
||||
codes[12] = new HuffmanCode(0xfffffea, 28);
|
||||
codes[13] = new HuffmanCode(0x3ffffffd, 30);
|
||||
codes[14] = new HuffmanCode(0xfffffeb, 28);
|
||||
codes[15] = new HuffmanCode(0xfffffec, 28);
|
||||
codes[16] = new HuffmanCode(0xfffffed, 28);
|
||||
codes[17] = new HuffmanCode(0xfffffee, 28);
|
||||
codes[18] = new HuffmanCode(0xfffffef, 28);
|
||||
codes[19] = new HuffmanCode(0xffffff0, 28);
|
||||
codes[20] = new HuffmanCode(0xffffff1, 28);
|
||||
codes[21] = new HuffmanCode(0xffffff2, 28);
|
||||
codes[22] = new HuffmanCode(0x3ffffffe, 30);
|
||||
codes[23] = new HuffmanCode(0xffffff3, 28);
|
||||
codes[24] = new HuffmanCode(0xffffff4, 28);
|
||||
codes[25] = new HuffmanCode(0xffffff5, 28);
|
||||
codes[26] = new HuffmanCode(0xffffff6, 28);
|
||||
codes[27] = new HuffmanCode(0xffffff7, 28);
|
||||
codes[28] = new HuffmanCode(0xffffff8, 28);
|
||||
codes[29] = new HuffmanCode(0xffffff9, 28);
|
||||
codes[30] = new HuffmanCode(0xffffffa, 28);
|
||||
codes[31] = new HuffmanCode(0xffffffb, 28);
|
||||
codes[32] = new HuffmanCode(0x14, 6);
|
||||
codes[33] = new HuffmanCode(0x3f8, 10);
|
||||
codes[34] = new HuffmanCode(0x3f9, 10);
|
||||
codes[35] = new HuffmanCode(0xffa, 12);
|
||||
codes[36] = new HuffmanCode(0x1ff9, 13);
|
||||
codes[37] = new HuffmanCode(0x15, 6);
|
||||
codes[38] = new HuffmanCode(0xf8, 8);
|
||||
codes[39] = new HuffmanCode(0x7fa, 11);
|
||||
codes[40] = new HuffmanCode(0x3fa, 10);
|
||||
codes[41] = new HuffmanCode(0x3fb, 10);
|
||||
codes[42] = new HuffmanCode(0xf9, 8);
|
||||
codes[43] = new HuffmanCode(0x7fb, 11);
|
||||
codes[44] = new HuffmanCode(0xfa, 8);
|
||||
codes[45] = new HuffmanCode(0x16, 6);
|
||||
codes[46] = new HuffmanCode(0x17, 6);
|
||||
codes[47] = new HuffmanCode(0x18, 6);
|
||||
codes[48] = new HuffmanCode(0x0, 5);
|
||||
codes[49] = new HuffmanCode(0x1, 5);
|
||||
codes[50] = new HuffmanCode(0x2, 5);
|
||||
codes[51] = new HuffmanCode(0x19, 6);
|
||||
codes[52] = new HuffmanCode(0x1a, 6);
|
||||
codes[53] = new HuffmanCode(0x1b, 6);
|
||||
codes[54] = new HuffmanCode(0x1c, 6);
|
||||
codes[55] = new HuffmanCode(0x1d, 6);
|
||||
codes[56] = new HuffmanCode(0x1e, 6);
|
||||
codes[57] = new HuffmanCode(0x1f, 6);
|
||||
codes[58] = new HuffmanCode(0x5c, 7);
|
||||
codes[59] = new HuffmanCode(0xfb, 8);
|
||||
codes[60] = new HuffmanCode(0x7ffc, 15);
|
||||
codes[61] = new HuffmanCode(0x20, 6);
|
||||
codes[62] = new HuffmanCode(0xffb, 12);
|
||||
codes[63] = new HuffmanCode(0x3fc, 10);
|
||||
codes[64] = new HuffmanCode(0x1ffa, 13);
|
||||
codes[65] = new HuffmanCode(0x21, 6);
|
||||
codes[66] = new HuffmanCode(0x5d, 7);
|
||||
codes[67] = new HuffmanCode(0x5e, 7);
|
||||
codes[68] = new HuffmanCode(0x5f, 7);
|
||||
codes[69] = new HuffmanCode(0x60, 7);
|
||||
codes[70] = new HuffmanCode(0x61, 7);
|
||||
codes[71] = new HuffmanCode(0x62, 7);
|
||||
codes[72] = new HuffmanCode(0x63, 7);
|
||||
codes[73] = new HuffmanCode(0x64, 7);
|
||||
codes[74] = new HuffmanCode(0x65, 7);
|
||||
codes[75] = new HuffmanCode(0x66, 7);
|
||||
codes[76] = new HuffmanCode(0x67, 7);
|
||||
codes[77] = new HuffmanCode(0x68, 7);
|
||||
codes[78] = new HuffmanCode(0x69, 7);
|
||||
codes[79] = new HuffmanCode(0x6a, 7);
|
||||
codes[80] = new HuffmanCode(0x6b, 7);
|
||||
codes[81] = new HuffmanCode(0x6c, 7);
|
||||
codes[82] = new HuffmanCode(0x6d, 7);
|
||||
codes[83] = new HuffmanCode(0x6e, 7);
|
||||
codes[84] = new HuffmanCode(0x6f, 7);
|
||||
codes[85] = new HuffmanCode(0x70, 7);
|
||||
codes[86] = new HuffmanCode(0x71, 7);
|
||||
codes[87] = new HuffmanCode(0x72, 7);
|
||||
codes[88] = new HuffmanCode(0xfc, 8);
|
||||
codes[89] = new HuffmanCode(0x73, 7);
|
||||
codes[90] = new HuffmanCode(0xfd, 8);
|
||||
codes[91] = new HuffmanCode(0x1ffb, 13);
|
||||
codes[92] = new HuffmanCode(0x7fff0, 19);
|
||||
codes[93] = new HuffmanCode(0x1ffc, 13);
|
||||
codes[94] = new HuffmanCode(0x3ffc, 14);
|
||||
codes[95] = new HuffmanCode(0x22, 6);
|
||||
codes[96] = new HuffmanCode(0x7ffd, 15);
|
||||
codes[97] = new HuffmanCode(0x3, 5);
|
||||
codes[98] = new HuffmanCode(0x23, 6);
|
||||
codes[99] = new HuffmanCode(0x4, 5);
|
||||
codes[100] = new HuffmanCode(0x24, 6);
|
||||
codes[101] = new HuffmanCode(0x5, 5);
|
||||
codes[102] = new HuffmanCode(0x25, 6);
|
||||
codes[103] = new HuffmanCode(0x26, 6);
|
||||
codes[104] = new HuffmanCode(0x27, 6);
|
||||
codes[105] = new HuffmanCode(0x6, 5);
|
||||
codes[106] = new HuffmanCode(0x74, 7);
|
||||
codes[107] = new HuffmanCode(0x75, 7);
|
||||
codes[108] = new HuffmanCode(0x28, 6);
|
||||
codes[109] = new HuffmanCode(0x29, 6);
|
||||
codes[110] = new HuffmanCode(0x2a, 6);
|
||||
codes[111] = new HuffmanCode(0x7, 5);
|
||||
codes[112] = new HuffmanCode(0x2b, 6);
|
||||
codes[113] = new HuffmanCode(0x76, 7);
|
||||
codes[114] = new HuffmanCode(0x2c, 6);
|
||||
codes[115] = new HuffmanCode(0x8, 5);
|
||||
codes[116] = new HuffmanCode(0x9, 5);
|
||||
codes[117] = new HuffmanCode(0x2d, 6);
|
||||
codes[118] = new HuffmanCode(0x77, 7);
|
||||
codes[119] = new HuffmanCode(0x78, 7);
|
||||
codes[120] = new HuffmanCode(0x79, 7);
|
||||
codes[121] = new HuffmanCode(0x7a, 7);
|
||||
codes[122] = new HuffmanCode(0x7b, 7);
|
||||
codes[123] = new HuffmanCode(0x7ffe, 15);
|
||||
codes[124] = new HuffmanCode(0x7fc, 11);
|
||||
codes[125] = new HuffmanCode(0x3ffd, 14);
|
||||
codes[126] = new HuffmanCode(0x1ffd, 13);
|
||||
codes[127] = new HuffmanCode(0xffffffc, 28);
|
||||
codes[128] = new HuffmanCode(0xfffe6, 20);
|
||||
codes[129] = new HuffmanCode(0x3fffd2, 22);
|
||||
codes[130] = new HuffmanCode(0xfffe7, 20);
|
||||
codes[131] = new HuffmanCode(0xfffe8, 20);
|
||||
codes[132] = new HuffmanCode(0x3fffd3, 22);
|
||||
codes[133] = new HuffmanCode(0x3fffd4, 22);
|
||||
codes[134] = new HuffmanCode(0x3fffd5, 22);
|
||||
codes[135] = new HuffmanCode(0x7fffd9, 23);
|
||||
codes[136] = new HuffmanCode(0x3fffd6, 22);
|
||||
codes[137] = new HuffmanCode(0x7fffda, 23);
|
||||
codes[138] = new HuffmanCode(0x7fffdb, 23);
|
||||
codes[139] = new HuffmanCode(0x7fffdc, 23);
|
||||
codes[140] = new HuffmanCode(0x7fffdd, 23);
|
||||
codes[141] = new HuffmanCode(0x7fffde, 23);
|
||||
codes[142] = new HuffmanCode(0xffffeb, 24);
|
||||
codes[143] = new HuffmanCode(0x7fffdf, 23);
|
||||
codes[144] = new HuffmanCode(0xffffec, 24);
|
||||
codes[145] = new HuffmanCode(0xffffed, 24);
|
||||
codes[146] = new HuffmanCode(0x3fffd7, 22);
|
||||
codes[147] = new HuffmanCode(0x7fffe0, 23);
|
||||
codes[148] = new HuffmanCode(0xffffee, 24);
|
||||
codes[149] = new HuffmanCode(0x7fffe1, 23);
|
||||
codes[150] = new HuffmanCode(0x7fffe2, 23);
|
||||
codes[151] = new HuffmanCode(0x7fffe3, 23);
|
||||
codes[152] = new HuffmanCode(0x7fffe4, 23);
|
||||
codes[153] = new HuffmanCode(0x1fffdc, 21);
|
||||
codes[154] = new HuffmanCode(0x3fffd8, 22);
|
||||
codes[155] = new HuffmanCode(0x7fffe5, 23);
|
||||
codes[156] = new HuffmanCode(0x3fffd9, 22);
|
||||
codes[157] = new HuffmanCode(0x7fffe6, 23);
|
||||
codes[158] = new HuffmanCode(0x7fffe7, 23);
|
||||
codes[159] = new HuffmanCode(0xffffef, 24);
|
||||
codes[160] = new HuffmanCode(0x3fffda, 22);
|
||||
codes[161] = new HuffmanCode(0x1fffdd, 21);
|
||||
codes[162] = new HuffmanCode(0xfffe9, 20);
|
||||
codes[163] = new HuffmanCode(0x3fffdb, 22);
|
||||
codes[164] = new HuffmanCode(0x3fffdc, 22);
|
||||
codes[165] = new HuffmanCode(0x7fffe8, 23);
|
||||
codes[166] = new HuffmanCode(0x7fffe9, 23);
|
||||
codes[167] = new HuffmanCode(0x1fffde, 21);
|
||||
codes[168] = new HuffmanCode(0x7fffea, 23);
|
||||
codes[169] = new HuffmanCode(0x3fffdd, 22);
|
||||
codes[170] = new HuffmanCode(0x3fffde, 22);
|
||||
codes[171] = new HuffmanCode(0xfffff0, 24);
|
||||
codes[172] = new HuffmanCode(0x1fffdf, 21);
|
||||
codes[173] = new HuffmanCode(0x3fffdf, 22);
|
||||
codes[174] = new HuffmanCode(0x7fffeb, 23);
|
||||
codes[175] = new HuffmanCode(0x7fffec, 23);
|
||||
codes[176] = new HuffmanCode(0x1fffe0, 21);
|
||||
codes[177] = new HuffmanCode(0x1fffe1, 21);
|
||||
codes[178] = new HuffmanCode(0x3fffe0, 22);
|
||||
codes[179] = new HuffmanCode(0x1fffe2, 21);
|
||||
codes[180] = new HuffmanCode(0x7fffed, 23);
|
||||
codes[181] = new HuffmanCode(0x3fffe1, 22);
|
||||
codes[182] = new HuffmanCode(0x7fffee, 23);
|
||||
codes[183] = new HuffmanCode(0x7fffef, 23);
|
||||
codes[184] = new HuffmanCode(0xfffea, 20);
|
||||
codes[185] = new HuffmanCode(0x3fffe2, 22);
|
||||
codes[186] = new HuffmanCode(0x3fffe3, 22);
|
||||
codes[187] = new HuffmanCode(0x3fffe4, 22);
|
||||
codes[188] = new HuffmanCode(0x7ffff0, 23);
|
||||
codes[189] = new HuffmanCode(0x3fffe5, 22);
|
||||
codes[190] = new HuffmanCode(0x3fffe6, 22);
|
||||
codes[191] = new HuffmanCode(0x7ffff1, 23);
|
||||
codes[192] = new HuffmanCode(0x3ffffe0, 26);
|
||||
codes[193] = new HuffmanCode(0x3ffffe1, 26);
|
||||
codes[194] = new HuffmanCode(0xfffeb, 20);
|
||||
codes[195] = new HuffmanCode(0x7fff1, 19);
|
||||
codes[196] = new HuffmanCode(0x3fffe7, 22);
|
||||
codes[197] = new HuffmanCode(0x7ffff2, 23);
|
||||
codes[198] = new HuffmanCode(0x3fffe8, 22);
|
||||
codes[199] = new HuffmanCode(0x1ffffec, 25);
|
||||
codes[200] = new HuffmanCode(0x3ffffe2, 26);
|
||||
codes[201] = new HuffmanCode(0x3ffffe3, 26);
|
||||
codes[202] = new HuffmanCode(0x3ffffe4, 26);
|
||||
codes[203] = new HuffmanCode(0x7ffffde, 27);
|
||||
codes[204] = new HuffmanCode(0x7ffffdf, 27);
|
||||
codes[205] = new HuffmanCode(0x3ffffe5, 26);
|
||||
codes[206] = new HuffmanCode(0xfffff1, 24);
|
||||
codes[207] = new HuffmanCode(0x1ffffed, 25);
|
||||
codes[208] = new HuffmanCode(0x7fff2, 19);
|
||||
codes[209] = new HuffmanCode(0x1fffe3, 21);
|
||||
codes[210] = new HuffmanCode(0x3ffffe6, 26);
|
||||
codes[211] = new HuffmanCode(0x7ffffe0, 27);
|
||||
codes[212] = new HuffmanCode(0x7ffffe1, 27);
|
||||
codes[213] = new HuffmanCode(0x3ffffe7, 26);
|
||||
codes[214] = new HuffmanCode(0x7ffffe2, 27);
|
||||
codes[215] = new HuffmanCode(0xfffff2, 24);
|
||||
codes[216] = new HuffmanCode(0x1fffe4, 21);
|
||||
codes[217] = new HuffmanCode(0x1fffe5, 21);
|
||||
codes[218] = new HuffmanCode(0x3ffffe8, 26);
|
||||
codes[219] = new HuffmanCode(0x3ffffe9, 26);
|
||||
codes[220] = new HuffmanCode(0xffffffd, 28);
|
||||
codes[221] = new HuffmanCode(0x7ffffe3, 27);
|
||||
codes[222] = new HuffmanCode(0x7ffffe4, 27);
|
||||
codes[223] = new HuffmanCode(0x7ffffe5, 27);
|
||||
codes[224] = new HuffmanCode(0xfffec, 20);
|
||||
codes[225] = new HuffmanCode(0xfffff3, 24);
|
||||
codes[226] = new HuffmanCode(0xfffed, 20);
|
||||
codes[227] = new HuffmanCode(0x1fffe6, 21);
|
||||
codes[228] = new HuffmanCode(0x3fffe9, 22);
|
||||
codes[229] = new HuffmanCode(0x1fffe7, 21);
|
||||
codes[230] = new HuffmanCode(0x1fffe8, 21);
|
||||
codes[231] = new HuffmanCode(0x7ffff3, 23);
|
||||
codes[232] = new HuffmanCode(0x3fffea, 22);
|
||||
codes[233] = new HuffmanCode(0x3fffeb, 22);
|
||||
codes[234] = new HuffmanCode(0x1ffffee, 25);
|
||||
codes[235] = new HuffmanCode(0x1ffffef, 25);
|
||||
codes[236] = new HuffmanCode(0xfffff4, 24);
|
||||
codes[237] = new HuffmanCode(0xfffff5, 24);
|
||||
codes[238] = new HuffmanCode(0x3ffffea, 26);
|
||||
codes[239] = new HuffmanCode(0x7ffff4, 23);
|
||||
codes[240] = new HuffmanCode(0x3ffffeb, 26);
|
||||
codes[241] = new HuffmanCode(0x7ffffe6, 27);
|
||||
codes[242] = new HuffmanCode(0x3ffffec, 26);
|
||||
codes[243] = new HuffmanCode(0x3ffffed, 26);
|
||||
codes[244] = new HuffmanCode(0x7ffffe7, 27);
|
||||
codes[245] = new HuffmanCode(0x7ffffe8, 27);
|
||||
codes[246] = new HuffmanCode(0x7ffffe9, 27);
|
||||
codes[247] = new HuffmanCode(0x7ffffea, 27);
|
||||
codes[248] = new HuffmanCode(0x7ffffeb, 27);
|
||||
codes[249] = new HuffmanCode(0xffffffe, 28);
|
||||
codes[250] = new HuffmanCode(0x7ffffec, 27);
|
||||
codes[251] = new HuffmanCode(0x7ffffed, 27);
|
||||
codes[252] = new HuffmanCode(0x7ffffee, 27);
|
||||
codes[253] = new HuffmanCode(0x7ffffef, 27);
|
||||
codes[254] = new HuffmanCode(0x7fffff0, 27);
|
||||
codes[255] = new HuffmanCode(0x3ffffee, 26);
|
||||
codes[256] = new HuffmanCode(0x3fffffff, 30);
|
||||
HUFFMAN_CODES = codes;
|
||||
|
||||
//lengths determined by experimentation, just set it to something large then see how large it actually ends up
|
||||
int[] codingTree = new int[256];
|
||||
//the current position in the tree
|
||||
int pos = 0;
|
||||
int allocated = 1; //the next position to allocate to
|
||||
//map of the current state at a given position
|
||||
//only used while building the tree
|
||||
HuffmanCode[] currentCode = new HuffmanCode[256];
|
||||
currentCode[0] = new HuffmanCode(0, 0);
|
||||
|
||||
final Set<HuffmanCode> allCodes = new HashSet<>();
|
||||
allCodes.addAll(Arrays.asList(HUFFMAN_CODES));
|
||||
|
||||
while (!allCodes.isEmpty()) {
|
||||
int length = currentCode[pos].length;
|
||||
int code = currentCode[pos].value;
|
||||
|
||||
int newLength = length + 1;
|
||||
HuffmanCode high = new HuffmanCode(code << 1 | 1, newLength);
|
||||
HuffmanCode low = new HuffmanCode(code << 1, newLength);
|
||||
int newVal = 0;
|
||||
boolean highTerminal = allCodes.remove(high);
|
||||
if (highTerminal) {
|
||||
//bah, linear search
|
||||
int i = 0;
|
||||
for (i = 0; i < codes.length; ++i) {
|
||||
if (codes[i].equals(high)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
newVal = LOW_TERMINAL_BIT | i;
|
||||
} else {
|
||||
int highPos = allocated++;
|
||||
currentCode[highPos] = high;
|
||||
newVal = highPos;
|
||||
}
|
||||
newVal <<= 16;
|
||||
boolean lowTerminal = allCodes.remove(low);
|
||||
if (lowTerminal) {
|
||||
//bah, linear search
|
||||
int i = 0;
|
||||
for (i = 0; i < codes.length; ++i) {
|
||||
if (codes[i].equals(low)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
newVal |= LOW_TERMINAL_BIT | i;
|
||||
} else {
|
||||
int lowPos = allocated++;
|
||||
currentCode[lowPos] = low;
|
||||
newVal |= lowPos;
|
||||
}
|
||||
codingTree[pos] = newVal;
|
||||
pos++;
|
||||
}
|
||||
DECODING_TABLE = codingTree;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes a huffman encoded string into the target StringBuilder. There
|
||||
* must be enough space left in the buffer for this method to succeed.
|
||||
*
|
||||
* @param data The byte buffer
|
||||
* @param length The length of data from the buffer to decode
|
||||
* @param target The target for the decompressed data
|
||||
*
|
||||
* @throws HpackException If the Huffman encoded value in HPACK headers did
|
||||
* not end with EOS padding
|
||||
*/
|
||||
public static void decode(ByteBuffer data, int length, StringBuilder target)
|
||||
throws HpackException {
|
||||
assert data.remaining() >= length;
|
||||
int treePos = 0;
|
||||
boolean eosBits = true;
|
||||
int eosBitCount = 0;
|
||||
for (int i = 0; i < length; ++i) {
|
||||
byte b = data.get();
|
||||
int bitPos = 7;
|
||||
while (bitPos >= 0) {
|
||||
int val = DECODING_TABLE[treePos];
|
||||
if (((1 << bitPos) & b) == 0) {
|
||||
//bit not set, we want the lower part of the tree
|
||||
if ((val & LOW_TERMINAL_BIT) == 0) {
|
||||
treePos = val & LOW_MASK;
|
||||
eosBits = false;
|
||||
eosBitCount = 0;
|
||||
} else {
|
||||
target.append((char) (val & LOW_MASK));
|
||||
treePos = 0;
|
||||
eosBits = true;
|
||||
}
|
||||
} else {
|
||||
if (eosBits) {
|
||||
eosBitCount++;
|
||||
}
|
||||
//bit not set, we want the lower part of the tree
|
||||
if ((val & HIGH_TERMINAL_BIT) == 0) {
|
||||
treePos = (val >> 16) & LOW_MASK;
|
||||
} else {
|
||||
if (eosBitCount != 0) {
|
||||
// This must be the EOS symbol which MUST be treated
|
||||
// as an error
|
||||
throw new HpackException(sm.getString("hpackhuffman.stringLiteralEOS"));
|
||||
}
|
||||
target.append((char) ((val >> 16) & LOW_MASK));
|
||||
treePos = 0;
|
||||
eosBits = true;
|
||||
}
|
||||
}
|
||||
bitPos--;
|
||||
}
|
||||
}
|
||||
if (eosBitCount > 7) {
|
||||
throw new HpackException(sm.getString(
|
||||
"hpackhuffman.stringLiteralTooMuchPadding"));
|
||||
}
|
||||
if (!eosBits) {
|
||||
throw new HpackException(sm.getString(
|
||||
"hpackhuffman.huffmanEncodedHpackValueDidNotEndWithEOS"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Encodes the given string into the buffer. If there is not enough space in
|
||||
* the buffer, or the encoded version is bigger than the original it will
|
||||
* return false and not modify the buffers position.
|
||||
*
|
||||
* @param buffer The buffer to encode into
|
||||
* @param toEncode The string to encode
|
||||
* @param forceLowercase If the string should be encoded in lower case
|
||||
* @return true if encoding succeeded
|
||||
*/
|
||||
public static boolean encode(ByteBuffer buffer, String toEncode, boolean forceLowercase) {
|
||||
if (buffer.remaining() <= toEncode.length()) {
|
||||
return false;
|
||||
}
|
||||
int start = buffer.position();
|
||||
//this sucks, but we need to put the length first
|
||||
//and we don't really have any option but to calculate it in advance to make sure we have left enough room
|
||||
//so we end up iterating twice
|
||||
int length = 0;
|
||||
for (int i = 0; i < toEncode.length(); ++i) {
|
||||
char c = toEncode.charAt(i);
|
||||
if (c > 255) {
|
||||
throw new IllegalArgumentException(sm.getString("hpack.invalidCharacter",
|
||||
Character.toString(c), Integer.valueOf(c)));
|
||||
}
|
||||
if(forceLowercase) {
|
||||
c = Hpack.toLower(c);
|
||||
}
|
||||
HuffmanCode code = HUFFMAN_CODES[c];
|
||||
length += code.length;
|
||||
}
|
||||
int byteLength = length / 8 + (length % 8 == 0 ? 0 : 1);
|
||||
|
||||
buffer.put((byte) (1 << 7));
|
||||
Hpack.encodeInteger(buffer, byteLength, 7);
|
||||
|
||||
|
||||
int bytePos = 0;
|
||||
byte currentBufferByte = 0;
|
||||
for (int i = 0; i < toEncode.length(); ++i) {
|
||||
char c = toEncode.charAt(i);
|
||||
if(forceLowercase) {
|
||||
c = Hpack.toLower(c);
|
||||
}
|
||||
HuffmanCode code = HUFFMAN_CODES[c];
|
||||
if (code.length + bytePos <= 8) {
|
||||
//it fits in the current byte
|
||||
currentBufferByte |= ((code.value & 0xFF) << 8 - (code.length + bytePos));
|
||||
bytePos += code.length;
|
||||
} else {
|
||||
//it does not fit, it may need up to 4 bytes
|
||||
int val = code.value;
|
||||
int rem = code.length;
|
||||
while (rem > 0) {
|
||||
if (!buffer.hasRemaining()) {
|
||||
buffer.position(start);
|
||||
return false;
|
||||
}
|
||||
int remainingInByte = 8 - bytePos;
|
||||
if (rem > remainingInByte) {
|
||||
currentBufferByte |= (val >> (rem - remainingInByte));
|
||||
} else {
|
||||
currentBufferByte |= (val << (remainingInByte - rem));
|
||||
}
|
||||
if (rem > remainingInByte) {
|
||||
buffer.put(currentBufferByte);
|
||||
currentBufferByte = 0;
|
||||
bytePos = 0;
|
||||
} else {
|
||||
bytePos = rem;
|
||||
}
|
||||
rem -= remainingInByte;
|
||||
}
|
||||
}
|
||||
if (bytePos == 8) {
|
||||
if (!buffer.hasRemaining()) {
|
||||
buffer.position(start);
|
||||
return false;
|
||||
}
|
||||
buffer.put(currentBufferByte);
|
||||
currentBufferByte = 0;
|
||||
bytePos = 0;
|
||||
}
|
||||
if (buffer.position() - start > toEncode.length()) {
|
||||
//the encoded version is longer than the original
|
||||
//just return false
|
||||
buffer.position(start);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (bytePos > 0) {
|
||||
//add the EOS bytes if we have not finished on a single byte
|
||||
if (!buffer.hasRemaining()) {
|
||||
buffer.position(start);
|
||||
return false;
|
||||
}
|
||||
buffer.put((byte) (currentBufferByte | ((0xFF) >> bytePos)));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected static class HuffmanCode {
|
||||
/**
|
||||
* The value of the least significant bits of the code
|
||||
*/
|
||||
int value;
|
||||
/**
|
||||
* length of the code, in bits
|
||||
*/
|
||||
int length;
|
||||
|
||||
public HuffmanCode(int value, int length) {
|
||||
this.value = value;
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
||||
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
HuffmanCode that = (HuffmanCode) o;
|
||||
|
||||
if (length != that.length) return false;
|
||||
if (value != that.value) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = value;
|
||||
result = 31 * result + length;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "HuffmanCode{" +
|
||||
"value=" + value +
|
||||
", length=" + length +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
}
|
||||
44
java/org/apache/coyote/http2/HeaderSink.java
Normal file
44
java/org/apache/coyote/http2/HeaderSink.java
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import org.apache.coyote.http2.HpackDecoder.HeaderEmitter;
|
||||
|
||||
/**
|
||||
* Purpose of this class is to silently swallow any headers. It is used once
|
||||
* the connection close process has started if headers for new streams are
|
||||
* received.
|
||||
*/
|
||||
public class HeaderSink implements HeaderEmitter {
|
||||
|
||||
@Override
|
||||
public void emitHeader(String name, String value) {
|
||||
// NO-OP
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateHeaders() throws StreamException {
|
||||
// NO-OP
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setHeaderException(StreamException streamException) {
|
||||
// NO-OP
|
||||
// The connection is already closing so no need to process additional
|
||||
// errors
|
||||
}
|
||||
}
|
||||
216
java/org/apache/coyote/http2/Hpack.java
Normal file
216
java/org/apache/coyote/http2/Hpack.java
Normal file
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
final class Hpack {
|
||||
|
||||
private static final StringManager sm = StringManager.getManager(Hpack.class);
|
||||
|
||||
private static final byte LOWER_DIFF = 'a' - 'A';
|
||||
static final int DEFAULT_TABLE_SIZE = 4096;
|
||||
private static final int MAX_INTEGER_OCTETS = 8; //not sure what a good value for this is, but the spec says we need to provide an upper bound
|
||||
|
||||
/**
|
||||
* table that contains powers of two,
|
||||
* used as both bitmask and to quickly calculate 2^n
|
||||
*/
|
||||
private static final int[] PREFIX_TABLE;
|
||||
|
||||
|
||||
static final HeaderField[] STATIC_TABLE;
|
||||
static final int STATIC_TABLE_LENGTH;
|
||||
|
||||
static {
|
||||
PREFIX_TABLE = new int[32];
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
int n = 0;
|
||||
for (int j = 0; j < i; ++j) {
|
||||
n = n << 1;
|
||||
n |= 1;
|
||||
}
|
||||
PREFIX_TABLE[i] = n;
|
||||
}
|
||||
|
||||
HeaderField[] fields = new HeaderField[62];
|
||||
//note that zero is not used
|
||||
fields[1] = new HeaderField(":authority", null);
|
||||
fields[2] = new HeaderField(":method", "GET");
|
||||
fields[3] = new HeaderField(":method", "POST");
|
||||
fields[4] = new HeaderField(":path", "/");
|
||||
fields[5] = new HeaderField(":path", "/index.html");
|
||||
fields[6] = new HeaderField(":scheme", "http");
|
||||
fields[7] = new HeaderField(":scheme", "https");
|
||||
fields[8] = new HeaderField(":status", "200");
|
||||
fields[9] = new HeaderField(":status", "204");
|
||||
fields[10] = new HeaderField(":status", "206");
|
||||
fields[11] = new HeaderField(":status", "304");
|
||||
fields[12] = new HeaderField(":status", "400");
|
||||
fields[13] = new HeaderField(":status", "404");
|
||||
fields[14] = new HeaderField(":status", "500");
|
||||
fields[15] = new HeaderField("accept-charset", null);
|
||||
fields[16] = new HeaderField("accept-encoding", "gzip, deflate");
|
||||
fields[17] = new HeaderField("accept-language", null);
|
||||
fields[18] = new HeaderField("accept-ranges", null);
|
||||
fields[19] = new HeaderField("accept", null);
|
||||
fields[20] = new HeaderField("access-control-allow-origin", null);
|
||||
fields[21] = new HeaderField("age", null);
|
||||
fields[22] = new HeaderField("allow", null);
|
||||
fields[23] = new HeaderField("authorization", null);
|
||||
fields[24] = new HeaderField("cache-control", null);
|
||||
fields[25] = new HeaderField("content-disposition", null);
|
||||
fields[26] = new HeaderField("content-encoding", null);
|
||||
fields[27] = new HeaderField("content-language", null);
|
||||
fields[28] = new HeaderField("content-length", null);
|
||||
fields[29] = new HeaderField("content-location", null);
|
||||
fields[30] = new HeaderField("content-range", null);
|
||||
fields[31] = new HeaderField("content-type", null);
|
||||
fields[32] = new HeaderField("cookie", null);
|
||||
fields[33] = new HeaderField("date", null);
|
||||
fields[34] = new HeaderField("etag", null);
|
||||
fields[35] = new HeaderField("expect", null);
|
||||
fields[36] = new HeaderField("expires", null);
|
||||
fields[37] = new HeaderField("from", null);
|
||||
fields[38] = new HeaderField("host", null);
|
||||
fields[39] = new HeaderField("if-match", null);
|
||||
fields[40] = new HeaderField("if-modified-since", null);
|
||||
fields[41] = new HeaderField("if-none-match", null);
|
||||
fields[42] = new HeaderField("if-range", null);
|
||||
fields[43] = new HeaderField("if-unmodified-since", null);
|
||||
fields[44] = new HeaderField("last-modified", null);
|
||||
fields[45] = new HeaderField("link", null);
|
||||
fields[46] = new HeaderField("location", null);
|
||||
fields[47] = new HeaderField("max-forwards", null);
|
||||
fields[48] = new HeaderField("proxy-authenticate", null);
|
||||
fields[49] = new HeaderField("proxy-authorization", null);
|
||||
fields[50] = new HeaderField("range", null);
|
||||
fields[51] = new HeaderField("referer", null);
|
||||
fields[52] = new HeaderField("refresh", null);
|
||||
fields[53] = new HeaderField("retry-after", null);
|
||||
fields[54] = new HeaderField("server", null);
|
||||
fields[55] = new HeaderField("set-cookie", null);
|
||||
fields[56] = new HeaderField("strict-transport-security", null);
|
||||
fields[57] = new HeaderField("transfer-encoding", null);
|
||||
fields[58] = new HeaderField("user-agent", null);
|
||||
fields[59] = new HeaderField("vary", null);
|
||||
fields[60] = new HeaderField("via", null);
|
||||
fields[61] = new HeaderField("www-authenticate", null);
|
||||
STATIC_TABLE = fields;
|
||||
STATIC_TABLE_LENGTH = STATIC_TABLE.length - 1;
|
||||
}
|
||||
|
||||
static class HeaderField {
|
||||
final String name;
|
||||
final String value;
|
||||
final int size;
|
||||
|
||||
HeaderField(String name, String value) {
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
if (value != null) {
|
||||
this.size = 32 + name.length() + value.length();
|
||||
} else {
|
||||
this.size = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes an integer in the HPACK prefix format. If the return value is -1
|
||||
* it means that there was not enough data in the buffer to complete the decoding
|
||||
* sequence.
|
||||
* <p/>
|
||||
* If this method returns -1 then the source buffer will not have been modified.
|
||||
*
|
||||
* @param source The buffer that contains the integer
|
||||
* @param n The encoding prefix length
|
||||
* @return The encoded integer, or -1 if there was not enough data
|
||||
*/
|
||||
static int decodeInteger(ByteBuffer source, int n) throws HpackException {
|
||||
if (source.remaining() == 0) {
|
||||
return -1;
|
||||
}
|
||||
int count = 1;
|
||||
int sp = source.position();
|
||||
int mask = PREFIX_TABLE[n];
|
||||
|
||||
int i = mask & source.get();
|
||||
int b;
|
||||
if (i < PREFIX_TABLE[n]) {
|
||||
return i;
|
||||
} else {
|
||||
int m = 0;
|
||||
do {
|
||||
if(count++ > MAX_INTEGER_OCTETS) {
|
||||
throw new HpackException(sm.getString("hpack.integerEncodedOverTooManyOctets",
|
||||
Integer.valueOf(MAX_INTEGER_OCTETS)));
|
||||
}
|
||||
if (source.remaining() == 0) {
|
||||
//we have run out of data
|
||||
//reset
|
||||
source.position(sp);
|
||||
return -1;
|
||||
}
|
||||
b = source.get();
|
||||
i = i + (b & 127) * (PREFIX_TABLE[m] + 1);
|
||||
m += 7;
|
||||
} while ((b & 128) == 128);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes an integer in the HPACK prefix format.
|
||||
* <p/>
|
||||
* This method assumes that the buffer has already had the first 8-n bits filled.
|
||||
* As such it will modify the last byte that is already present in the buffer, and
|
||||
* potentially add more if required
|
||||
*
|
||||
* @param source The buffer that contains the integer
|
||||
* @param value The integer to encode
|
||||
* @param n The encoding prefix length
|
||||
*/
|
||||
static void encodeInteger(ByteBuffer source, int value, int n) {
|
||||
int twoNminus1 = PREFIX_TABLE[n];
|
||||
int pos = source.position() - 1;
|
||||
if (value < twoNminus1) {
|
||||
source.put(pos, (byte) (source.get(pos) | value));
|
||||
} else {
|
||||
source.put(pos, (byte) (source.get(pos) | twoNminus1));
|
||||
value = value - twoNminus1;
|
||||
while (value >= 128) {
|
||||
source.put((byte) (value % 128 + 128));
|
||||
value = value / 128;
|
||||
}
|
||||
source.put((byte) value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static char toLower(char c) {
|
||||
if (c >= 'A' && c <= 'Z') {
|
||||
return (char) (c + LOWER_DIFF);
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
private Hpack() {}
|
||||
|
||||
}
|
||||
482
java/org/apache/coyote/http2/HpackDecoder.java
Normal file
482
java/org/apache/coyote/http2/HpackDecoder.java
Normal file
@@ -0,0 +1,482 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
/**
|
||||
* A decoder for HPACK.
|
||||
*/
|
||||
public class HpackDecoder {
|
||||
|
||||
protected static final StringManager sm = StringManager.getManager(HpackDecoder.class);
|
||||
|
||||
private static final int DEFAULT_RING_BUFFER_SIZE = 10;
|
||||
|
||||
/**
|
||||
* The object that receives the headers that are emitted from this decoder
|
||||
*/
|
||||
private HeaderEmitter headerEmitter;
|
||||
|
||||
/**
|
||||
* The header table
|
||||
*/
|
||||
private Hpack.HeaderField[] headerTable;
|
||||
|
||||
/**
|
||||
* The current HEAD position of the header table. We use a ring buffer type
|
||||
* construct as it would be silly to actually shuffle the items around in the
|
||||
* array.
|
||||
*/
|
||||
private int firstSlotPosition = 0;
|
||||
|
||||
/**
|
||||
* The current table size by index (aka the number of index positions that are filled up)
|
||||
*/
|
||||
private int filledTableSlots = 0;
|
||||
|
||||
/**
|
||||
* the current calculates memory size, as per the HPACK algorithm
|
||||
*/
|
||||
private int currentMemorySize = 0;
|
||||
|
||||
/**
|
||||
* The maximum allowed memory size set by the container.
|
||||
*/
|
||||
private int maxMemorySizeHard;
|
||||
/**
|
||||
* The maximum memory size currently in use. May be less than the hard limit.
|
||||
*/
|
||||
private int maxMemorySizeSoft;
|
||||
|
||||
private int maxHeaderCount = Constants.DEFAULT_MAX_HEADER_COUNT;
|
||||
private int maxHeaderSize = Constants.DEFAULT_MAX_HEADER_SIZE;
|
||||
|
||||
private volatile int headerCount = 0;
|
||||
private volatile boolean countedCookie;
|
||||
private volatile int headerSize = 0;
|
||||
|
||||
private final StringBuilder stringBuilder = new StringBuilder();
|
||||
|
||||
public HpackDecoder(int maxMemorySize) {
|
||||
this.maxMemorySizeHard = maxMemorySize;
|
||||
this.maxMemorySizeSoft = maxMemorySize;
|
||||
headerTable = new Hpack.HeaderField[DEFAULT_RING_BUFFER_SIZE];
|
||||
}
|
||||
|
||||
public HpackDecoder() {
|
||||
this(Hpack.DEFAULT_TABLE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the provided frame data. If this method leaves data in the buffer
|
||||
* then this buffer should be compacted so this data is preserved, unless
|
||||
* there is no more data in which case this should be considered a protocol error.
|
||||
*
|
||||
* @param buffer The buffer
|
||||
*
|
||||
* @throws HpackException If the packed data is not valid
|
||||
*/
|
||||
public void decode(ByteBuffer buffer) throws HpackException {
|
||||
while (buffer.hasRemaining()) {
|
||||
int originalPos = buffer.position();
|
||||
byte b = buffer.get();
|
||||
if ((b & 0b10000000) != 0) {
|
||||
//if the first bit is set it is an indexed header field
|
||||
buffer.position(buffer.position() - 1); //unget the byte
|
||||
int index = Hpack.decodeInteger(buffer, 7); //prefix is 7
|
||||
if (index == -1) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
} else if(index == 0) {
|
||||
throw new HpackException(
|
||||
sm.getString("hpackdecoder.zeroNotValidHeaderTableIndex"));
|
||||
}
|
||||
handleIndex(index);
|
||||
} else if ((b & 0b01000000) != 0) {
|
||||
//Literal Header Field with Incremental Indexing
|
||||
String headerName = readHeaderName(buffer, 6);
|
||||
if (headerName == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
String headerValue = readHpackString(buffer);
|
||||
if (headerValue == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
emitHeader(headerName, headerValue);
|
||||
addEntryToHeaderTable(new Hpack.HeaderField(headerName, headerValue));
|
||||
} else if ((b & 0b11110000) == 0) {
|
||||
//Literal Header Field without Indexing
|
||||
String headerName = readHeaderName(buffer, 4);
|
||||
if (headerName == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
String headerValue = readHpackString(buffer);
|
||||
if (headerValue == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
emitHeader(headerName, headerValue);
|
||||
} else if ((b & 0b11110000) == 0b00010000) {
|
||||
//Literal Header Field never indexed
|
||||
String headerName = readHeaderName(buffer, 4);
|
||||
if (headerName == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
String headerValue = readHpackString(buffer);
|
||||
if (headerValue == null) {
|
||||
buffer.position(originalPos);
|
||||
return;
|
||||
}
|
||||
emitHeader(headerName, headerValue);
|
||||
} else if ((b & 0b11100000) == 0b00100000) {
|
||||
//context update max table size change
|
||||
if (!handleMaxMemorySizeChange(buffer, originalPos)) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
throw new RuntimeException(sm.getString("hpackdecoder.notImplemented"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean handleMaxMemorySizeChange(ByteBuffer buffer, int originalPos) throws HpackException {
|
||||
if (headerCount != 0) {
|
||||
throw new HpackException(sm.getString("hpackdecoder.tableSizeUpdateNotAtStart"));
|
||||
}
|
||||
buffer.position(buffer.position() - 1); //unget the byte
|
||||
int size = Hpack.decodeInteger(buffer, 5);
|
||||
if (size == -1) {
|
||||
buffer.position(originalPos);
|
||||
return false;
|
||||
}
|
||||
if (size > maxMemorySizeHard) {
|
||||
throw new HpackException(sm.getString("hpackdecoder.maxMemorySizeExceeded",
|
||||
Integer.valueOf(size), Integer.valueOf(maxMemorySizeHard)));
|
||||
}
|
||||
maxMemorySizeSoft = size;
|
||||
if (currentMemorySize > maxMemorySizeSoft) {
|
||||
int newTableSlots = filledTableSlots;
|
||||
int tableLength = headerTable.length;
|
||||
int newSize = currentMemorySize;
|
||||
while (newSize > maxMemorySizeSoft) {
|
||||
int clearIndex = firstSlotPosition;
|
||||
firstSlotPosition++;
|
||||
if (firstSlotPosition == tableLength) {
|
||||
firstSlotPosition = 0;
|
||||
}
|
||||
Hpack.HeaderField oldData = headerTable[clearIndex];
|
||||
headerTable[clearIndex] = null;
|
||||
newSize -= oldData.size;
|
||||
newTableSlots--;
|
||||
}
|
||||
this.filledTableSlots = newTableSlots;
|
||||
currentMemorySize = newSize;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private String readHeaderName(ByteBuffer buffer, int prefixLength) throws HpackException {
|
||||
buffer.position(buffer.position() - 1); //unget the byte
|
||||
int index = Hpack.decodeInteger(buffer, prefixLength);
|
||||
if (index == -1) {
|
||||
return null;
|
||||
} else if (index != 0) {
|
||||
return handleIndexedHeaderName(index);
|
||||
} else {
|
||||
return readHpackString(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
private String readHpackString(ByteBuffer buffer) throws HpackException {
|
||||
if (!buffer.hasRemaining()) {
|
||||
return null;
|
||||
}
|
||||
byte data = buffer.get(buffer.position());
|
||||
|
||||
int length = Hpack.decodeInteger(buffer, 7);
|
||||
if (buffer.remaining() < length) {
|
||||
return null;
|
||||
}
|
||||
boolean huffman = (data & 0b10000000) != 0;
|
||||
if (huffman) {
|
||||
return readHuffmanString(length, buffer);
|
||||
}
|
||||
for (int i = 0; i < length; ++i) {
|
||||
stringBuilder.append((char) buffer.get());
|
||||
}
|
||||
String ret = stringBuilder.toString();
|
||||
stringBuilder.setLength(0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
private String readHuffmanString(int length, ByteBuffer buffer) throws HpackException {
|
||||
HPackHuffman.decode(buffer, length, stringBuilder);
|
||||
String ret = stringBuilder.toString();
|
||||
stringBuilder.setLength(0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
private String handleIndexedHeaderName(int index) throws HpackException {
|
||||
if (index <= Hpack.STATIC_TABLE_LENGTH) {
|
||||
return Hpack.STATIC_TABLE[index].name;
|
||||
} else {
|
||||
// index is 1 based
|
||||
if (index > Hpack.STATIC_TABLE_LENGTH + filledTableSlots) {
|
||||
throw new HpackException(sm.getString("hpackdecoder.headerTableIndexInvalid",
|
||||
Integer.valueOf(index), Integer.valueOf(Hpack.STATIC_TABLE_LENGTH),
|
||||
Integer.valueOf(filledTableSlots)));
|
||||
}
|
||||
int adjustedIndex = getRealIndex(index - Hpack.STATIC_TABLE_LENGTH);
|
||||
Hpack.HeaderField res = headerTable[adjustedIndex];
|
||||
if (res == null) {
|
||||
throw new HpackException(sm.getString("hpackdecoder.nullHeader", Integer.valueOf(index)));
|
||||
}
|
||||
return res.name;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle an indexed header representation
|
||||
*
|
||||
* @param index The index
|
||||
* @throws HpackException
|
||||
*/
|
||||
private void handleIndex(int index) throws HpackException {
|
||||
if (index <= Hpack.STATIC_TABLE_LENGTH) {
|
||||
addStaticTableEntry(index);
|
||||
} else {
|
||||
int adjustedIndex = getRealIndex(index - Hpack.STATIC_TABLE_LENGTH);
|
||||
Hpack.HeaderField headerField = headerTable[adjustedIndex];
|
||||
emitHeader(headerField.name, headerField.value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* because we use a ring buffer type construct, and don't actually shuffle
|
||||
* items in the array, we need to figure out the real index to use.
|
||||
* <p/>
|
||||
* package private for unit tests
|
||||
*
|
||||
* @param index The index from the hpack
|
||||
* @return the real index into the array
|
||||
*/
|
||||
int getRealIndex(int index) throws HpackException {
|
||||
//the index is one based, but our table is zero based, hence -1
|
||||
//also because of our ring buffer setup the indexes are reversed
|
||||
//index = 1 is at position firstSlotPosition + filledSlots
|
||||
int realIndex = (firstSlotPosition + (filledTableSlots - index)) % headerTable.length;
|
||||
if (realIndex < 0) {
|
||||
throw new HpackException(sm.getString("hpackdecoder.headerTableIndexInvalid",
|
||||
Integer.valueOf(index), Integer.valueOf(Hpack.STATIC_TABLE_LENGTH),
|
||||
Integer.valueOf(filledTableSlots)));
|
||||
}
|
||||
return realIndex;
|
||||
}
|
||||
|
||||
private void addStaticTableEntry(int index) throws HpackException {
|
||||
//adds an entry from the static table.
|
||||
Hpack.HeaderField entry = Hpack.STATIC_TABLE[index];
|
||||
emitHeader(entry.name, (entry.value == null) ? "" : entry.value);
|
||||
}
|
||||
|
||||
private void addEntryToHeaderTable(Hpack.HeaderField entry) {
|
||||
if (entry.size > maxMemorySizeSoft) {
|
||||
//it is to big to fit, so we just completely clear the table.
|
||||
while (filledTableSlots > 0) {
|
||||
headerTable[firstSlotPosition] = null;
|
||||
firstSlotPosition++;
|
||||
if (firstSlotPosition == headerTable.length) {
|
||||
firstSlotPosition = 0;
|
||||
}
|
||||
filledTableSlots--;
|
||||
}
|
||||
currentMemorySize = 0;
|
||||
return;
|
||||
}
|
||||
resizeIfRequired();
|
||||
int newTableSlots = filledTableSlots + 1;
|
||||
int tableLength = headerTable.length;
|
||||
int index = (firstSlotPosition + filledTableSlots) % tableLength;
|
||||
headerTable[index] = entry;
|
||||
int newSize = currentMemorySize + entry.size;
|
||||
while (newSize > maxMemorySizeSoft) {
|
||||
int clearIndex = firstSlotPosition;
|
||||
firstSlotPosition++;
|
||||
if (firstSlotPosition == tableLength) {
|
||||
firstSlotPosition = 0;
|
||||
}
|
||||
Hpack.HeaderField oldData = headerTable[clearIndex];
|
||||
headerTable[clearIndex] = null;
|
||||
newSize -= oldData.size;
|
||||
newTableSlots--;
|
||||
}
|
||||
this.filledTableSlots = newTableSlots;
|
||||
currentMemorySize = newSize;
|
||||
}
|
||||
|
||||
private void resizeIfRequired() {
|
||||
if(filledTableSlots == headerTable.length) {
|
||||
Hpack.HeaderField[] newArray = new Hpack.HeaderField[headerTable.length + 10]; //we only grow slowly
|
||||
for(int i = 0; i < headerTable.length; ++i) {
|
||||
newArray[i] = headerTable[(firstSlotPosition + i) % headerTable.length];
|
||||
}
|
||||
firstSlotPosition = 0;
|
||||
headerTable = newArray;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Interface implemented by the intended recipient of the headers.
|
||||
*/
|
||||
interface HeaderEmitter {
|
||||
/**
|
||||
* Pass a single header to the recipient.
|
||||
*
|
||||
* @param name Header name
|
||||
* @param value Header value
|
||||
* @throws HpackException If a header is received that is not compliant
|
||||
* with the HTTP/2 specification
|
||||
*/
|
||||
void emitHeader(String name, String value) throws HpackException;
|
||||
|
||||
/**
|
||||
* Inform the recipient of the headers that a stream error needs to be
|
||||
* triggered using the given message when {@link #validateHeaders()} is
|
||||
* called. This is used when the Parser becomes aware of an error that
|
||||
* is not visible to the recipient.
|
||||
*
|
||||
* @param streamException The exception to use when resetting the stream
|
||||
*/
|
||||
void setHeaderException(StreamException streamException);
|
||||
|
||||
/**
|
||||
* Are the headers pass to the recipient so far valid? The decoder needs
|
||||
* to process all the headers to maintain state even if there is a
|
||||
* problem. In addition, it is easy for the the intended recipient to
|
||||
* track if the complete set of headers is valid since to do that state
|
||||
* needs to be maintained between the parsing of the initial headers and
|
||||
* the parsing of any trailer headers. The recipient is the best place
|
||||
* to maintain that state.
|
||||
*
|
||||
* @throws StreamException If the headers received to date are not valid
|
||||
*/
|
||||
void validateHeaders() throws StreamException;
|
||||
}
|
||||
|
||||
|
||||
public HeaderEmitter getHeaderEmitter() {
|
||||
return headerEmitter;
|
||||
}
|
||||
|
||||
|
||||
void setHeaderEmitter(HeaderEmitter headerEmitter) {
|
||||
this.headerEmitter = headerEmitter;
|
||||
// Reset limit tracking
|
||||
headerCount = 0;
|
||||
countedCookie = false;
|
||||
headerSize = 0;
|
||||
}
|
||||
|
||||
|
||||
void setMaxHeaderCount(int maxHeaderCount) {
|
||||
this.maxHeaderCount = maxHeaderCount;
|
||||
}
|
||||
|
||||
|
||||
void setMaxHeaderSize(int maxHeaderSize) {
|
||||
this.maxHeaderSize = maxHeaderSize;
|
||||
}
|
||||
|
||||
|
||||
private void emitHeader(String name, String value) throws HpackException {
|
||||
// Header names are forced to lower case
|
||||
if ("cookie".equals(name)) {
|
||||
// Only count the cookie header once since HTTP/2 splits it into
|
||||
// multiple headers to aid compression
|
||||
if (!countedCookie) {
|
||||
headerCount ++;
|
||||
countedCookie = true;
|
||||
}
|
||||
} else {
|
||||
headerCount ++;
|
||||
}
|
||||
// Overhead will vary. The main concern is that lots of small headers
|
||||
// trigger the limiting mechanism correctly. Therefore, use an overhead
|
||||
// estimate of 3 which is the worst case for small headers.
|
||||
int inc = 3 + name.length() + value.length();
|
||||
headerSize += inc;
|
||||
if (!isHeaderCountExceeded() && !isHeaderSizeExceeded(0)) {
|
||||
headerEmitter.emitHeader(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
boolean isHeaderCountExceeded() {
|
||||
if (maxHeaderCount < 0) {
|
||||
return false;
|
||||
}
|
||||
return headerCount > maxHeaderCount;
|
||||
}
|
||||
|
||||
|
||||
boolean isHeaderSizeExceeded(int unreadSize) {
|
||||
if (maxHeaderSize < 0) {
|
||||
return false;
|
||||
}
|
||||
return (headerSize + unreadSize) > maxHeaderSize;
|
||||
}
|
||||
|
||||
|
||||
boolean isHeaderSwallowSizeExceeded(int unreadSize) {
|
||||
if (maxHeaderSize < 0) {
|
||||
return false;
|
||||
}
|
||||
// Swallow the same again before closing the connection.
|
||||
return (headerSize + unreadSize) > (2 * maxHeaderSize);
|
||||
}
|
||||
|
||||
|
||||
//package private fields for unit tests
|
||||
|
||||
int getFirstSlotPosition() {
|
||||
return firstSlotPosition;
|
||||
}
|
||||
|
||||
Hpack.HeaderField[] getHeaderTable() {
|
||||
return headerTable;
|
||||
}
|
||||
|
||||
int getFilledTableSlots() {
|
||||
return filledTableSlots;
|
||||
}
|
||||
|
||||
int getCurrentMemorySize() {
|
||||
return currentMemorySize;
|
||||
}
|
||||
|
||||
int getMaxMemorySizeSoft() {
|
||||
return maxMemorySizeSoft;
|
||||
}
|
||||
}
|
||||
393
java/org/apache/coyote/http2/HpackEncoder.java
Normal file
393
java/org/apache/coyote/http2/HpackEncoder.java
Normal file
@@ -0,0 +1,393 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.http.MimeHeaders;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
/**
|
||||
* Encoder for HPACK frames.
|
||||
*/
|
||||
public class HpackEncoder {
|
||||
|
||||
private static final Log log = LogFactory.getLog(HpackEncoder.class);
|
||||
private static final StringManager sm = StringManager.getManager(HpackEncoder.class);
|
||||
|
||||
public static final HpackHeaderFunction DEFAULT_HEADER_FUNCTION = new HpackHeaderFunction() {
|
||||
@Override
|
||||
public boolean shouldUseIndexing(String headerName, String value) {
|
||||
//content length and date change all the time
|
||||
//no need to index them, or they will churn the table
|
||||
return !headerName.equals("content-length") && !headerName.equals("date");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldUseHuffman(String header, String value) {
|
||||
return value.length() > 5; //TODO: figure out a good value for this
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldUseHuffman(String header) {
|
||||
return header.length() > 5; //TODO: figure out a good value for this
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
private int headersIterator = -1;
|
||||
private boolean firstPass = true;
|
||||
|
||||
private MimeHeaders currentHeaders;
|
||||
|
||||
private int entryPositionCounter;
|
||||
|
||||
private int newMaxHeaderSize = -1; //if the max header size has been changed
|
||||
private int minNewMaxHeaderSize = -1; //records the smallest value of newMaxHeaderSize, as per section 4.1
|
||||
|
||||
private static final Map<String, TableEntry[]> ENCODING_STATIC_TABLE;
|
||||
|
||||
private final Deque<TableEntry> evictionQueue = new ArrayDeque<>();
|
||||
private final Map<String, List<TableEntry>> dynamicTable = new HashMap<>(); //TODO: use a custom data structure to reduce allocations
|
||||
|
||||
static {
|
||||
Map<String, TableEntry[]> map = new HashMap<>();
|
||||
for (int i = 1; i < Hpack.STATIC_TABLE.length; ++i) {
|
||||
Hpack.HeaderField m = Hpack.STATIC_TABLE[i];
|
||||
TableEntry[] existing = map.get(m.name);
|
||||
if (existing == null) {
|
||||
map.put(m.name, new TableEntry[]{new TableEntry(m.name, m.value, i)});
|
||||
} else {
|
||||
TableEntry[] newEntry = new TableEntry[existing.length + 1];
|
||||
System.arraycopy(existing, 0, newEntry, 0, existing.length);
|
||||
newEntry[existing.length] = new TableEntry(m.name, m.value, i);
|
||||
map.put(m.name, newEntry);
|
||||
}
|
||||
}
|
||||
ENCODING_STATIC_TABLE = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum table size
|
||||
*/
|
||||
private int maxTableSize = Hpack.DEFAULT_TABLE_SIZE;
|
||||
|
||||
/**
|
||||
* The current table size
|
||||
*/
|
||||
private int currentTableSize;
|
||||
|
||||
private final HpackHeaderFunction hpackHeaderFunction;
|
||||
|
||||
HpackEncoder() {
|
||||
this.hpackHeaderFunction = DEFAULT_HEADER_FUNCTION;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the headers into a buffer.
|
||||
*
|
||||
* @param headers The headers to encode
|
||||
* @param target The buffer to which to write the encoded headers
|
||||
*
|
||||
* @return The state of the encoding process
|
||||
*/
|
||||
public State encode(MimeHeaders headers, ByteBuffer target) {
|
||||
int it = headersIterator;
|
||||
if (headersIterator == -1) {
|
||||
handleTableSizeChange(target);
|
||||
//new headers map
|
||||
it = 0;
|
||||
currentHeaders = headers;
|
||||
} else {
|
||||
if (headers != currentHeaders) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
while (it < currentHeaders.size()) {
|
||||
// FIXME: Review lowercase policy
|
||||
String headerName = headers.getName(it).toString().toLowerCase(Locale.US);
|
||||
boolean skip = false;
|
||||
if (firstPass) {
|
||||
if (headerName.charAt(0) != ':') {
|
||||
skip = true;
|
||||
}
|
||||
} else {
|
||||
if (headerName.charAt(0) == ':') {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
if (!skip) {
|
||||
String val = headers.getValue(it).toString();
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("hpackEncoder.encodeHeader", headerName, val));
|
||||
}
|
||||
TableEntry tableEntry = findInTable(headerName, val);
|
||||
|
||||
// We use 11 to make sure we have enough room for the
|
||||
// variable length integers
|
||||
int required = 11 + headerName.length() + 1 + val.length();
|
||||
|
||||
if (target.remaining() < required) {
|
||||
this.headersIterator = it;
|
||||
return State.UNDERFLOW;
|
||||
}
|
||||
// Only index if it will fit
|
||||
boolean canIndex = hpackHeaderFunction.shouldUseIndexing(headerName, val) &&
|
||||
(headerName.length() + val.length() + 32) < maxTableSize;
|
||||
if (tableEntry == null && canIndex) {
|
||||
//add the entry to the dynamic table
|
||||
target.put((byte) (1 << 6));
|
||||
writeHuffmanEncodableName(target, headerName);
|
||||
writeHuffmanEncodableValue(target, headerName, val);
|
||||
addToDynamicTable(headerName, val);
|
||||
} else if (tableEntry == null) {
|
||||
//literal never indexed
|
||||
target.put((byte) (1 << 4));
|
||||
writeHuffmanEncodableName(target, headerName);
|
||||
writeHuffmanEncodableValue(target, headerName, val);
|
||||
} else {
|
||||
//so we know something is already in the table
|
||||
if (val.equals(tableEntry.value)) {
|
||||
//the whole thing is in the table
|
||||
target.put((byte) (1 << 7));
|
||||
Hpack.encodeInteger(target, tableEntry.getPosition(), 7);
|
||||
} else {
|
||||
if (canIndex) {
|
||||
//add the entry to the dynamic table
|
||||
target.put((byte) (1 << 6));
|
||||
Hpack.encodeInteger(target, tableEntry.getPosition(), 6);
|
||||
writeHuffmanEncodableValue(target, headerName, val);
|
||||
addToDynamicTable(headerName, val);
|
||||
|
||||
} else {
|
||||
target.put((byte) (1 << 4));
|
||||
Hpack.encodeInteger(target, tableEntry.getPosition(), 4);
|
||||
writeHuffmanEncodableValue(target, headerName, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (++it == currentHeaders.size() && firstPass) {
|
||||
firstPass = false;
|
||||
it = 0;
|
||||
}
|
||||
}
|
||||
headersIterator = -1;
|
||||
firstPass = true;
|
||||
return State.COMPLETE;
|
||||
}
|
||||
|
||||
private void writeHuffmanEncodableName(ByteBuffer target, String headerName) {
|
||||
if (hpackHeaderFunction.shouldUseHuffman(headerName)) {
|
||||
if(HPackHuffman.encode(target, headerName, true)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
target.put((byte) 0); //to use encodeInteger we need to place the first byte in the buffer.
|
||||
Hpack.encodeInteger(target, headerName.length(), 7);
|
||||
for (int j = 0; j < headerName.length(); ++j) {
|
||||
target.put((byte) Hpack.toLower(headerName.charAt(j)));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void writeHuffmanEncodableValue(ByteBuffer target, String headerName, String val) {
|
||||
if (hpackHeaderFunction.shouldUseHuffman(headerName, val)) {
|
||||
if (!HPackHuffman.encode(target, val, false)) {
|
||||
writeValueString(target, val);
|
||||
}
|
||||
} else {
|
||||
writeValueString(target, val);
|
||||
}
|
||||
}
|
||||
|
||||
private void writeValueString(ByteBuffer target, String val) {
|
||||
target.put((byte) 0); //to use encodeInteger we need to place the first byte in the buffer.
|
||||
Hpack.encodeInteger(target, val.length(), 7);
|
||||
for (int j = 0; j < val.length(); ++j) {
|
||||
target.put((byte) val.charAt(j));
|
||||
}
|
||||
}
|
||||
|
||||
private void addToDynamicTable(String headerName, String val) {
|
||||
int pos = entryPositionCounter++;
|
||||
DynamicTableEntry d = new DynamicTableEntry(headerName, val, -pos);
|
||||
List<TableEntry> existing = dynamicTable.get(headerName);
|
||||
if (existing == null) {
|
||||
dynamicTable.put(headerName, existing = new ArrayList<>(1));
|
||||
}
|
||||
existing.add(d);
|
||||
evictionQueue.add(d);
|
||||
currentTableSize += d.size;
|
||||
runEvictionIfRequired();
|
||||
if (entryPositionCounter == Integer.MAX_VALUE) {
|
||||
//prevent rollover
|
||||
preventPositionRollover();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void preventPositionRollover() {
|
||||
//if the position counter is about to roll over we iterate all the table entries
|
||||
//and set their position to their actual position
|
||||
for (Map.Entry<String, List<TableEntry>> entry : dynamicTable.entrySet()) {
|
||||
for (TableEntry t : entry.getValue()) {
|
||||
t.position = t.getPosition();
|
||||
}
|
||||
}
|
||||
entryPositionCounter = 0;
|
||||
}
|
||||
|
||||
private void runEvictionIfRequired() {
|
||||
|
||||
while (currentTableSize > maxTableSize) {
|
||||
TableEntry next = evictionQueue.poll();
|
||||
if (next == null) {
|
||||
return;
|
||||
}
|
||||
currentTableSize -= next.size;
|
||||
List<TableEntry> list = dynamicTable.get(next.name);
|
||||
list.remove(next);
|
||||
if (list.isEmpty()) {
|
||||
dynamicTable.remove(next.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private TableEntry findInTable(String headerName, String value) {
|
||||
TableEntry[] staticTable = ENCODING_STATIC_TABLE.get(headerName);
|
||||
if (staticTable != null) {
|
||||
for (TableEntry st : staticTable) {
|
||||
if (st.value != null && st.value.equals(value)) { //todo: some form of lookup?
|
||||
return st;
|
||||
}
|
||||
}
|
||||
}
|
||||
List<TableEntry> dynamic = dynamicTable.get(headerName);
|
||||
if (dynamic != null) {
|
||||
for (TableEntry st : dynamic) {
|
||||
if (st.value.equals(value)) { //todo: some form of lookup?
|
||||
return st;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (staticTable != null) {
|
||||
return staticTable[0];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void setMaxTableSize(int newSize) {
|
||||
this.newMaxHeaderSize = newSize;
|
||||
if (minNewMaxHeaderSize == -1) {
|
||||
minNewMaxHeaderSize = newSize;
|
||||
} else {
|
||||
minNewMaxHeaderSize = Math.min(newSize, minNewMaxHeaderSize);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleTableSizeChange(ByteBuffer target) {
|
||||
if (newMaxHeaderSize == -1) {
|
||||
return;
|
||||
}
|
||||
if (minNewMaxHeaderSize != newMaxHeaderSize) {
|
||||
target.put((byte) (1 << 5));
|
||||
Hpack.encodeInteger(target, minNewMaxHeaderSize, 5);
|
||||
}
|
||||
target.put((byte) (1 << 5));
|
||||
Hpack.encodeInteger(target, newMaxHeaderSize, 5);
|
||||
maxTableSize = newMaxHeaderSize;
|
||||
runEvictionIfRequired();
|
||||
newMaxHeaderSize = -1;
|
||||
minNewMaxHeaderSize = -1;
|
||||
}
|
||||
|
||||
public enum State {
|
||||
COMPLETE,
|
||||
UNDERFLOW,
|
||||
|
||||
}
|
||||
|
||||
static class TableEntry {
|
||||
final String name;
|
||||
final String value;
|
||||
final int size;
|
||||
int position;
|
||||
|
||||
TableEntry(String name, String value, int position) {
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
this.position = position;
|
||||
if (value != null) {
|
||||
this.size = 32 + name.length() + value.length();
|
||||
} else {
|
||||
this.size = -1;
|
||||
}
|
||||
}
|
||||
|
||||
public int getPosition() {
|
||||
return position;
|
||||
}
|
||||
}
|
||||
|
||||
class DynamicTableEntry extends TableEntry {
|
||||
|
||||
DynamicTableEntry(String name, String value, int position) {
|
||||
super(name, value, position);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPosition() {
|
||||
return super.getPosition() + entryPositionCounter + Hpack.STATIC_TABLE_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
public interface HpackHeaderFunction {
|
||||
boolean shouldUseIndexing(String header, String value);
|
||||
|
||||
/**
|
||||
* Returns true if huffman encoding should be used on the header value
|
||||
*
|
||||
* @param header The header name
|
||||
* @param value The header value to be encoded
|
||||
* @return <code>true</code> if the value should be encoded
|
||||
*/
|
||||
boolean shouldUseHuffman(String header, String value);
|
||||
|
||||
/**
|
||||
* Returns true if huffman encoding should be used on the header name
|
||||
*
|
||||
* @param header The header name to be encoded
|
||||
* @return <code>true</code> if the value should be encoded
|
||||
*/
|
||||
boolean shouldUseHuffman(String header);
|
||||
}
|
||||
}
|
||||
36
java/org/apache/coyote/http2/HpackException.java
Normal file
36
java/org/apache/coyote/http2/HpackException.java
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
/**
|
||||
* Exception that is thrown when the HPACK compress context is broken. In this
|
||||
* case the connection must be closed.
|
||||
*/
|
||||
public class HpackException extends Exception {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public HpackException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
public HpackException(String message) {
|
||||
super(message);
|
||||
}
|
||||
public HpackException() {
|
||||
super();
|
||||
}
|
||||
}
|
||||
53
java/org/apache/coyote/http2/Http2Error.java
Normal file
53
java/org/apache/coyote/http2/Http2Error.java
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
public enum Http2Error {
|
||||
|
||||
NO_ERROR (0x00),
|
||||
PROTOCOL_ERROR (0x01),
|
||||
INTERNAL_ERROR (0x02),
|
||||
FLOW_CONTROL_ERROR (0x03),
|
||||
SETTINGS_TIMEOUT (0x04),
|
||||
STREAM_CLOSED (0x05),
|
||||
FRAME_SIZE_ERROR (0x06),
|
||||
REFUSED_STREAM (0x07),
|
||||
CANCEL (0x08),
|
||||
COMPRESSION_ERROR (0x09),
|
||||
CONNECT_ERROR (0x0a),
|
||||
ENHANCE_YOUR_CALM (0x0b),
|
||||
INADEQUATE_SECURITY (0x0c),
|
||||
HTTP_1_1_REQUIRED (0x0d);
|
||||
|
||||
private final long code;
|
||||
|
||||
private Http2Error(long code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
|
||||
public long getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
public byte[] getCodeBytes() {
|
||||
byte[] codeByte = new byte[4];
|
||||
ByteUtil.setFourBytes(codeByte, 0, code);
|
||||
return codeByte;
|
||||
}
|
||||
}
|
||||
41
java/org/apache/coyote/http2/Http2Exception.java
Normal file
41
java/org/apache/coyote/http2/Http2Exception.java
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
public abstract class Http2Exception extends Exception {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private final Http2Error error;
|
||||
|
||||
|
||||
Http2Exception(String msg, Http2Error error) {
|
||||
super(msg);
|
||||
this.error = error;
|
||||
}
|
||||
|
||||
|
||||
Http2Exception(String msg, Http2Error error, Throwable cause) {
|
||||
super(msg, cause);
|
||||
this.error = error;
|
||||
}
|
||||
|
||||
|
||||
Http2Error getError() {
|
||||
return error;
|
||||
}
|
||||
}
|
||||
87
java/org/apache/coyote/http2/Http2OutputBuffer.java
Normal file
87
java/org/apache/coyote/http2/Http2OutputBuffer.java
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.coyote.Response;
|
||||
import org.apache.coyote.http11.HttpOutputBuffer;
|
||||
import org.apache.coyote.http11.OutputFilter;
|
||||
import org.apache.coyote.http2.Stream.StreamOutputBuffer;
|
||||
import org.apache.tomcat.util.buf.ByteChunk;
|
||||
|
||||
public class Http2OutputBuffer implements HttpOutputBuffer {
|
||||
|
||||
private final Response coyoteResponse;
|
||||
private HttpOutputBuffer next;
|
||||
|
||||
|
||||
/**
|
||||
* Add a filter at the start of the existing processing chain. Subsequent
|
||||
* calls to the {@link HttpOutputBuffer} methods of this object will be
|
||||
* passed to the filter. If appropriate, the filter will then call the same
|
||||
* method on the next HttpOutputBuffer in the chain until the call reaches
|
||||
* the StreamOutputBuffer.
|
||||
*
|
||||
* @param filter The filter to add to the start of the processing chain
|
||||
*/
|
||||
public void addFilter(OutputFilter filter) {
|
||||
filter.setBuffer(next);
|
||||
next = filter;
|
||||
}
|
||||
|
||||
|
||||
public Http2OutputBuffer(Response coyoteResponse, StreamOutputBuffer streamOutputBuffer) {
|
||||
this.coyoteResponse = coyoteResponse;
|
||||
this.next = streamOutputBuffer;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int doWrite(ByteBuffer chunk) throws IOException {
|
||||
if (!coyoteResponse.isCommitted()) {
|
||||
coyoteResponse.sendHeaders();
|
||||
}
|
||||
return next.doWrite(chunk);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public long getBytesWritten() {
|
||||
return next.getBytesWritten();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void end() throws IOException {
|
||||
next.end();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
next.flush();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public int doWrite(ByteChunk chunk) throws IOException {
|
||||
return next.doWrite(chunk);
|
||||
}
|
||||
}
|
||||
675
java/org/apache/coyote/http2/Http2Parser.java
Normal file
675
java/org/apache/coyote/http2/Http2Parser.java
Normal file
@@ -0,0 +1,675 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.coyote.ProtocolException;
|
||||
import org.apache.coyote.http2.HpackDecoder.HeaderEmitter;
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.buf.ByteBufferUtils;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
class Http2Parser {
|
||||
|
||||
private static final Log log = LogFactory.getLog(Http2Parser.class);
|
||||
private static final StringManager sm = StringManager.getManager(Http2Parser.class);
|
||||
|
||||
static final byte[] CLIENT_PREFACE_START =
|
||||
"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n".getBytes(StandardCharsets.ISO_8859_1);
|
||||
|
||||
private final String connectionId;
|
||||
private final Input input;
|
||||
private final Output output;
|
||||
private final byte[] frameHeaderBuffer = new byte[9];
|
||||
|
||||
private volatile HpackDecoder hpackDecoder;
|
||||
private volatile ByteBuffer headerReadBuffer =
|
||||
ByteBuffer.allocate(Constants.DEFAULT_HEADER_READ_BUFFER_SIZE);
|
||||
private volatile int headersCurrentStream = -1;
|
||||
private volatile boolean headersEndStream = false;
|
||||
|
||||
Http2Parser(String connectionId, Input input, Output output) {
|
||||
this.connectionId = connectionId;
|
||||
this.input = input;
|
||||
this.output = output;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Read and process a single frame. Once the start of a frame is read, the
|
||||
* remainder will be read using blocking IO.
|
||||
*
|
||||
* @param block Should this method block until a frame is available if no
|
||||
* frame is available immediately?
|
||||
*
|
||||
* @return <code>true</code> if a frame was read otherwise
|
||||
* <code>false</code>
|
||||
*
|
||||
* @throws IOException If an IO error occurs while trying to read a frame
|
||||
*/
|
||||
boolean readFrame(boolean block) throws Http2Exception, IOException {
|
||||
return readFrame(block, null);
|
||||
}
|
||||
|
||||
|
||||
private boolean readFrame(boolean block, FrameType expected)
|
||||
throws IOException, Http2Exception {
|
||||
|
||||
if (!input.fill(block, frameHeaderBuffer)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int payloadSize = ByteUtil.getThreeBytes(frameHeaderBuffer, 0);
|
||||
FrameType frameType = FrameType.valueOf(ByteUtil.getOneByte(frameHeaderBuffer, 3));
|
||||
int flags = ByteUtil.getOneByte(frameHeaderBuffer, 4);
|
||||
int streamId = ByteUtil.get31Bits(frameHeaderBuffer, 5);
|
||||
|
||||
try {
|
||||
validateFrame(expected, frameType, streamId, flags, payloadSize);
|
||||
} catch (StreamException se) {
|
||||
swallow(streamId, payloadSize, false);
|
||||
throw se;
|
||||
}
|
||||
|
||||
switch (frameType) {
|
||||
case DATA:
|
||||
readDataFrame(streamId, flags, payloadSize);
|
||||
break;
|
||||
case HEADERS:
|
||||
readHeadersFrame(streamId, flags, payloadSize);
|
||||
break;
|
||||
case PRIORITY:
|
||||
readPriorityFrame(streamId);
|
||||
break;
|
||||
case RST:
|
||||
readRstFrame(streamId);
|
||||
break;
|
||||
case SETTINGS:
|
||||
readSettingsFrame(flags, payloadSize);
|
||||
break;
|
||||
case PUSH_PROMISE:
|
||||
readPushPromiseFrame(streamId);
|
||||
break;
|
||||
case PING:
|
||||
readPingFrame(flags);
|
||||
break;
|
||||
case GOAWAY:
|
||||
readGoawayFrame(payloadSize);
|
||||
break;
|
||||
case WINDOW_UPDATE:
|
||||
readWindowUpdateFrame(streamId);
|
||||
break;
|
||||
case CONTINUATION:
|
||||
readContinuationFrame(streamId, flags, payloadSize);
|
||||
break;
|
||||
case UNKNOWN:
|
||||
readUnknownFrame(streamId, frameType, flags, payloadSize);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
private void readDataFrame(int streamId, int flags, int payloadSize)
|
||||
throws Http2Exception, IOException {
|
||||
// Process the Stream
|
||||
int padLength = 0;
|
||||
|
||||
boolean endOfStream = Flags.isEndOfStream(flags);
|
||||
|
||||
int dataLength;
|
||||
if (Flags.hasPadding(flags)) {
|
||||
byte[] b = new byte[1];
|
||||
input.fill(true, b);
|
||||
padLength = b[0] & 0xFF;
|
||||
|
||||
if (padLength >= payloadSize) {
|
||||
throw new ConnectionException(
|
||||
sm.getString("http2Parser.processFrame.tooMuchPadding", connectionId,
|
||||
Integer.toString(streamId), Integer.toString(padLength),
|
||||
Integer.toString(payloadSize)), Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
// +1 is for the padding length byte we just read above
|
||||
dataLength = payloadSize - (padLength + 1);
|
||||
} else {
|
||||
dataLength = payloadSize;
|
||||
}
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
String padding;
|
||||
if (Flags.hasPadding(flags)) {
|
||||
padding = Integer.toString(padLength);
|
||||
} else {
|
||||
padding = "none";
|
||||
}
|
||||
log.debug(sm.getString("http2Parser.processFrameData.lengths", connectionId,
|
||||
Integer.toString(streamId), Integer.toString(dataLength), padding));
|
||||
}
|
||||
|
||||
ByteBuffer dest = output.startRequestBodyFrame(streamId, payloadSize, endOfStream);
|
||||
if (dest == null) {
|
||||
swallow(streamId, dataLength, false);
|
||||
// Process padding before sending any notifications in case padding
|
||||
// is invalid.
|
||||
if (padLength > 0) {
|
||||
swallow(streamId, padLength, true);
|
||||
}
|
||||
if (endOfStream) {
|
||||
output.receivedEndOfStream(streamId);
|
||||
}
|
||||
} else {
|
||||
synchronized (dest) {
|
||||
if (dest.remaining() < dataLength) {
|
||||
swallow(streamId, dataLength, false);
|
||||
// Client has sent more data than permitted by Window size
|
||||
throw new StreamException(sm.getString("http2Parser.processFrameData.window", connectionId),
|
||||
Http2Error.FLOW_CONTROL_ERROR, streamId);
|
||||
}
|
||||
input.fill(true, dest, dataLength);
|
||||
// Process padding before sending any notifications in case
|
||||
// padding is invalid.
|
||||
if (padLength > 0) {
|
||||
swallow(streamId, padLength, true);
|
||||
}
|
||||
if (endOfStream) {
|
||||
output.receivedEndOfStream(streamId);
|
||||
}
|
||||
output.endRequestBodyFrame(streamId);
|
||||
}
|
||||
}
|
||||
if (padLength > 0) {
|
||||
output.swallowedPadding(streamId, padLength);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void readHeadersFrame(int streamId, int flags, int payloadSize)
|
||||
throws Http2Exception, IOException {
|
||||
|
||||
headersEndStream = Flags.isEndOfStream(flags);
|
||||
|
||||
if (hpackDecoder == null) {
|
||||
hpackDecoder = output.getHpackDecoder();
|
||||
}
|
||||
try {
|
||||
hpackDecoder.setHeaderEmitter(output.headersStart(streamId, headersEndStream));
|
||||
} catch (StreamException se) {
|
||||
swallow(streamId, payloadSize, false);
|
||||
throw se;
|
||||
}
|
||||
|
||||
int padLength = 0;
|
||||
boolean padding = Flags.hasPadding(flags);
|
||||
boolean priority = Flags.hasPriority(flags);
|
||||
int optionalLen = 0;
|
||||
if (padding) {
|
||||
optionalLen = 1;
|
||||
}
|
||||
if (priority) {
|
||||
optionalLen += 5;
|
||||
}
|
||||
if (optionalLen > 0) {
|
||||
byte[] optional = new byte[optionalLen];
|
||||
input.fill(true, optional);
|
||||
int optionalPos = 0;
|
||||
if (padding) {
|
||||
padLength = ByteUtil.getOneByte(optional, optionalPos++);
|
||||
if (padLength >= payloadSize) {
|
||||
throw new ConnectionException(
|
||||
sm.getString("http2Parser.processFrame.tooMuchPadding", connectionId,
|
||||
Integer.toString(streamId), Integer.toString(padLength),
|
||||
Integer.toString(payloadSize)), Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
}
|
||||
if (priority) {
|
||||
boolean exclusive = ByteUtil.isBit7Set(optional[optionalPos]);
|
||||
int parentStreamId = ByteUtil.get31Bits(optional, optionalPos);
|
||||
int weight = ByteUtil.getOneByte(optional, optionalPos + 4) + 1;
|
||||
output.reprioritise(streamId, parentStreamId, exclusive, weight);
|
||||
}
|
||||
|
||||
payloadSize -= optionalLen;
|
||||
payloadSize -= padLength;
|
||||
}
|
||||
|
||||
readHeaderPayload(streamId, payloadSize);
|
||||
|
||||
swallow(streamId, padLength, true);
|
||||
|
||||
if (Flags.isEndOfHeaders(flags)) {
|
||||
onHeadersComplete(streamId);
|
||||
} else {
|
||||
headersCurrentStream = streamId;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void readPriorityFrame(int streamId) throws Http2Exception, IOException {
|
||||
byte[] payload = new byte[5];
|
||||
input.fill(true, payload);
|
||||
|
||||
boolean exclusive = ByteUtil.isBit7Set(payload[0]);
|
||||
int parentStreamId = ByteUtil.get31Bits(payload, 0);
|
||||
int weight = ByteUtil.getOneByte(payload, 4) + 1;
|
||||
|
||||
if (streamId == parentStreamId) {
|
||||
throw new StreamException(sm.getString("http2Parser.processFramePriority.invalidParent",
|
||||
connectionId, Integer.valueOf(streamId)), Http2Error.PROTOCOL_ERROR, streamId);
|
||||
}
|
||||
|
||||
output.reprioritise(streamId, parentStreamId, exclusive, weight);
|
||||
}
|
||||
|
||||
|
||||
private void readRstFrame(int streamId) throws Http2Exception, IOException {
|
||||
byte[] payload = new byte[4];
|
||||
input.fill(true, payload);
|
||||
|
||||
long errorCode = ByteUtil.getFourBytes(payload, 0);
|
||||
output.reset(streamId, errorCode);
|
||||
headersCurrentStream = -1;
|
||||
headersEndStream = false;
|
||||
}
|
||||
|
||||
|
||||
private void readSettingsFrame(int flags, int payloadSize) throws Http2Exception, IOException {
|
||||
boolean ack = Flags.isAck(flags);
|
||||
if (payloadSize > 0 && ack) {
|
||||
throw new ConnectionException(sm.getString(
|
||||
"http2Parser.processFrameSettings.ackWithNonZeroPayload"),
|
||||
Http2Error.FRAME_SIZE_ERROR);
|
||||
}
|
||||
|
||||
if (payloadSize == 0 && !ack) {
|
||||
// Ensure empty SETTINGS frame increments the overhead count
|
||||
output.setting(null, 0);
|
||||
} else {
|
||||
// Process the settings
|
||||
byte[] setting = new byte[6];
|
||||
for (int i = 0; i < payloadSize / 6; i++) {
|
||||
input.fill(true, setting);
|
||||
int id = ByteUtil.getTwoBytes(setting, 0);
|
||||
long value = ByteUtil.getFourBytes(setting, 2);
|
||||
output.setting(Setting.valueOf(id), value);
|
||||
}
|
||||
}
|
||||
output.settingsEnd(ack);
|
||||
}
|
||||
|
||||
|
||||
private void readPushPromiseFrame(int streamId) throws Http2Exception {
|
||||
throw new ConnectionException(sm.getString("http2Parser.processFramePushPromise",
|
||||
connectionId, Integer.valueOf(streamId)), Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
private void readPingFrame(int flags) throws IOException {
|
||||
// Read the payload
|
||||
byte[] payload = new byte[8];
|
||||
input.fill(true, payload);
|
||||
output.pingReceive(payload, Flags.isAck(flags));
|
||||
}
|
||||
|
||||
|
||||
private void readGoawayFrame(int payloadSize) throws IOException {
|
||||
byte[] payload = new byte[payloadSize];
|
||||
input.fill(true, payload);
|
||||
|
||||
int lastStreamId = ByteUtil.get31Bits(payload, 0);
|
||||
long errorCode = ByteUtil.getFourBytes(payload, 4);
|
||||
String debugData = null;
|
||||
if (payloadSize > 8) {
|
||||
debugData = new String(payload, 8, payloadSize - 8, StandardCharsets.UTF_8);
|
||||
}
|
||||
output.goaway(lastStreamId, errorCode, debugData);
|
||||
}
|
||||
|
||||
|
||||
private void readWindowUpdateFrame(int streamId) throws Http2Exception, IOException {
|
||||
byte[] payload = new byte[4];
|
||||
input.fill(true, payload);
|
||||
int windowSizeIncrement = ByteUtil.get31Bits(payload, 0);
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("http2Parser.processFrameWindowUpdate.debug", connectionId,
|
||||
Integer.toString(streamId), Integer.toString(windowSizeIncrement)));
|
||||
}
|
||||
|
||||
// Validate the data
|
||||
if (windowSizeIncrement == 0) {
|
||||
if (streamId == 0) {
|
||||
throw new ConnectionException(
|
||||
sm.getString("http2Parser.processFrameWindowUpdate.invalidIncrement"),
|
||||
Http2Error.PROTOCOL_ERROR);
|
||||
} else {
|
||||
throw new StreamException(
|
||||
sm.getString("http2Parser.processFrameWindowUpdate.invalidIncrement"),
|
||||
Http2Error.PROTOCOL_ERROR, streamId);
|
||||
}
|
||||
}
|
||||
|
||||
output.incrementWindowSize(streamId, windowSizeIncrement);
|
||||
}
|
||||
|
||||
|
||||
private void readContinuationFrame(int streamId, int flags, int payloadSize)
|
||||
throws Http2Exception, IOException {
|
||||
if (headersCurrentStream == -1) {
|
||||
// No headers to continue
|
||||
throw new ConnectionException(sm.getString(
|
||||
"http2Parser.processFrameContinuation.notExpected", connectionId,
|
||||
Integer.toString(streamId)), Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
|
||||
boolean endOfHeaders = Flags.isEndOfHeaders(flags);
|
||||
|
||||
// Used to detect abusive clients sending large numbers of small
|
||||
// continuation frames
|
||||
output.headersContinue(payloadSize, endOfHeaders);
|
||||
|
||||
readHeaderPayload(streamId, payloadSize);
|
||||
|
||||
if (endOfHeaders) {
|
||||
headersCurrentStream = -1;
|
||||
onHeadersComplete(streamId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void readHeaderPayload(int streamId, int payloadSize)
|
||||
throws Http2Exception, IOException {
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("http2Parser.processFrameHeaders.payload", connectionId,
|
||||
Integer.valueOf(streamId), Integer.valueOf(payloadSize)));
|
||||
}
|
||||
|
||||
int remaining = payloadSize;
|
||||
|
||||
while (remaining > 0) {
|
||||
if (headerReadBuffer.remaining() == 0) {
|
||||
// Buffer needs expansion
|
||||
int newSize;
|
||||
if (headerReadBuffer.capacity() < payloadSize) {
|
||||
// First step, expand to the current payload. That should
|
||||
// cover most cases.
|
||||
newSize = payloadSize;
|
||||
} else {
|
||||
// Header must be spread over multiple frames. Keep doubling
|
||||
// buffer size until the header can be read.
|
||||
newSize = headerReadBuffer.capacity() * 2;
|
||||
}
|
||||
headerReadBuffer = ByteBufferUtils.expand(headerReadBuffer, newSize);
|
||||
}
|
||||
int toRead = Math.min(headerReadBuffer.remaining(), remaining);
|
||||
// headerReadBuffer in write mode
|
||||
input.fill(true, headerReadBuffer, toRead);
|
||||
// switch to read mode
|
||||
headerReadBuffer.flip();
|
||||
try {
|
||||
hpackDecoder.decode(headerReadBuffer);
|
||||
} catch (HpackException hpe) {
|
||||
throw new ConnectionException(
|
||||
sm.getString("http2Parser.processFrameHeaders.decodingFailed"),
|
||||
Http2Error.COMPRESSION_ERROR, hpe);
|
||||
}
|
||||
|
||||
// switches to write mode
|
||||
headerReadBuffer.compact();
|
||||
remaining -= toRead;
|
||||
|
||||
if (hpackDecoder.isHeaderCountExceeded()) {
|
||||
StreamException headerException = new StreamException(sm.getString(
|
||||
"http2Parser.headerLimitCount", connectionId, Integer.valueOf(streamId)),
|
||||
Http2Error.ENHANCE_YOUR_CALM, streamId);
|
||||
hpackDecoder.getHeaderEmitter().setHeaderException(headerException);
|
||||
}
|
||||
|
||||
if (hpackDecoder.isHeaderSizeExceeded(headerReadBuffer.position())) {
|
||||
StreamException headerException = new StreamException(sm.getString(
|
||||
"http2Parser.headerLimitSize", connectionId, Integer.valueOf(streamId)),
|
||||
Http2Error.ENHANCE_YOUR_CALM, streamId);
|
||||
hpackDecoder.getHeaderEmitter().setHeaderException(headerException);
|
||||
}
|
||||
|
||||
if (hpackDecoder.isHeaderSwallowSizeExceeded(headerReadBuffer.position())) {
|
||||
throw new ConnectionException(sm.getString("http2Parser.headerLimitSize",
|
||||
connectionId, Integer.valueOf(streamId)), Http2Error.ENHANCE_YOUR_CALM);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void readUnknownFrame(int streamId, FrameType frameType, int flags, int payloadSize)
|
||||
throws IOException {
|
||||
try {
|
||||
swallow(streamId, payloadSize, false);
|
||||
} catch (ConnectionException e) {
|
||||
// Will never happen because swallow() is called with mustBeZero set
|
||||
// to false
|
||||
}
|
||||
output.swallowed(streamId, frameType, flags, payloadSize);
|
||||
}
|
||||
|
||||
|
||||
private void swallow(int streamId, int len, boolean mustBeZero)
|
||||
throws IOException, ConnectionException {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("http2Parser.swallow.debug", connectionId,
|
||||
Integer.toString(streamId), Integer.toString(len)));
|
||||
}
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
int read = 0;
|
||||
byte[] buffer = new byte[1024];
|
||||
while (read < len) {
|
||||
int thisTime = Math.min(buffer.length, len - read);
|
||||
input.fill(true, buffer, 0, thisTime);
|
||||
if (mustBeZero) {
|
||||
// Validate the padding is zero since receiving non-zero padding
|
||||
// is a strong indication of either a faulty client or a server
|
||||
// side bug.
|
||||
for (int i = 0; i < thisTime; i++) {
|
||||
if (buffer[i] != 0) {
|
||||
throw new ConnectionException(sm.getString("http2Parser.nonZeroPadding",
|
||||
connectionId, Integer.toString(streamId)), Http2Error.PROTOCOL_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
read += thisTime;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void onHeadersComplete(int streamId) throws Http2Exception {
|
||||
// Any left over data is a compression error
|
||||
if (headerReadBuffer.position() > 0) {
|
||||
throw new ConnectionException(
|
||||
sm.getString("http2Parser.processFrameHeaders.decodingDataLeft"),
|
||||
Http2Error.COMPRESSION_ERROR);
|
||||
}
|
||||
|
||||
// Delay validation (and triggering any exception) until this point
|
||||
// since all the headers still have to be read if a StreamException is
|
||||
// going to be thrown.
|
||||
hpackDecoder.getHeaderEmitter().validateHeaders();
|
||||
|
||||
output.headersEnd(streamId);
|
||||
|
||||
if (headersEndStream) {
|
||||
output.receivedEndOfStream(streamId);
|
||||
headersEndStream = false;
|
||||
}
|
||||
|
||||
// Reset size for new request if the buffer was previously expanded
|
||||
if (headerReadBuffer.capacity() > Constants.DEFAULT_HEADER_READ_BUFFER_SIZE) {
|
||||
headerReadBuffer = ByteBuffer.allocate(Constants.DEFAULT_HEADER_READ_BUFFER_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Implementation note:
|
||||
* Validation applicable to all incoming frames should be implemented here.
|
||||
* Frame type specific validation should be performed in the appropriate
|
||||
* readXxxFrame() method.
|
||||
* For validation applicable to some but not all frame types, use your
|
||||
* judgement.
|
||||
*/
|
||||
private void validateFrame(FrameType expected, FrameType frameType, int streamId, int flags,
|
||||
int payloadSize) throws Http2Exception {
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("http2Parser.processFrame", connectionId,
|
||||
Integer.toString(streamId), frameType, Integer.toString(flags),
|
||||
Integer.toString(payloadSize)));
|
||||
}
|
||||
|
||||
if (expected != null && frameType != expected) {
|
||||
throw new StreamException(sm.getString("http2Parser.processFrame.unexpectedType",
|
||||
expected, frameType), Http2Error.PROTOCOL_ERROR, streamId);
|
||||
}
|
||||
|
||||
int maxFrameSize = input.getMaxFrameSize();
|
||||
if (payloadSize > maxFrameSize) {
|
||||
throw new ConnectionException(sm.getString("http2Parser.payloadTooBig",
|
||||
Integer.toString(payloadSize), Integer.toString(maxFrameSize)),
|
||||
Http2Error.FRAME_SIZE_ERROR);
|
||||
}
|
||||
|
||||
if (headersCurrentStream != -1) {
|
||||
if (headersCurrentStream != streamId) {
|
||||
throw new ConnectionException(sm.getString("http2Parser.headers.wrongStream",
|
||||
connectionId, Integer.toString(headersCurrentStream),
|
||||
Integer.toString(streamId)), Http2Error.COMPRESSION_ERROR);
|
||||
}
|
||||
if (frameType == FrameType.RST) {
|
||||
// NO-OP: RST is OK here
|
||||
} else if (frameType != FrameType.CONTINUATION) {
|
||||
throw new ConnectionException(sm.getString("http2Parser.headers.wrongFrameType",
|
||||
connectionId, Integer.toString(headersCurrentStream),
|
||||
frameType), Http2Error.COMPRESSION_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
frameType.check(streamId, payloadSize);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Read and validate the connection preface from input using blocking IO.
|
||||
*/
|
||||
void readConnectionPreface() throws Http2Exception {
|
||||
byte[] data = new byte[CLIENT_PREFACE_START.length];
|
||||
try {
|
||||
input.fill(true, data);
|
||||
|
||||
for (int i = 0; i < CLIENT_PREFACE_START.length; i++) {
|
||||
if (CLIENT_PREFACE_START[i] != data[i]) {
|
||||
throw new ProtocolException(sm.getString("http2Parser.preface.invalid"));
|
||||
}
|
||||
}
|
||||
|
||||
// Must always be followed by a settings frame
|
||||
readFrame(true, FrameType.SETTINGS);
|
||||
} catch (IOException ioe) {
|
||||
throw new ProtocolException(sm.getString("http2Parser.preface.io"), ioe);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Interface that must be implemented by the source of data for the parser.
|
||||
*/
|
||||
static interface Input {
|
||||
|
||||
/**
|
||||
* Fill the given array with data unless non-blocking is requested and
|
||||
* no data is available. If any data is available then the buffer will
|
||||
* be filled using blocking I/O.
|
||||
*
|
||||
* @param block Should the first read into the provided buffer be a
|
||||
* blocking read or not.
|
||||
* @param data Buffer to fill
|
||||
* @param offset Position in buffer to start writing
|
||||
* @param length Number of bytes to read
|
||||
*
|
||||
* @return <code>true</code> if the buffer was filled otherwise
|
||||
* <code>false</code>
|
||||
*
|
||||
* @throws IOException If an I/O occurred while obtaining data with
|
||||
* which to fill the buffer
|
||||
*/
|
||||
boolean fill(boolean block, byte[] data, int offset, int length) throws IOException;
|
||||
|
||||
boolean fill(boolean block, byte[] data) throws IOException;
|
||||
|
||||
boolean fill(boolean block, ByteBuffer data, int len) throws IOException;
|
||||
|
||||
int getMaxFrameSize();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Interface that must be implemented to receive notifications from the
|
||||
* parser as it processes incoming frames.
|
||||
*/
|
||||
static interface Output {
|
||||
|
||||
HpackDecoder getHpackDecoder();
|
||||
|
||||
// Data frames
|
||||
ByteBuffer startRequestBodyFrame(int streamId, int payloadSize, boolean endOfStream) throws Http2Exception;
|
||||
void endRequestBodyFrame(int streamId) throws Http2Exception;
|
||||
void receivedEndOfStream(int streamId) throws ConnectionException;
|
||||
void swallowedPadding(int streamId, int paddingLength) throws ConnectionException, IOException;
|
||||
|
||||
// Header frames
|
||||
HeaderEmitter headersStart(int streamId, boolean headersEndStream)
|
||||
throws Http2Exception, IOException;
|
||||
void headersContinue(int payloadSize, boolean endOfHeaders);
|
||||
void headersEnd(int streamId) throws ConnectionException;
|
||||
|
||||
// Priority frames (also headers)
|
||||
void reprioritise(int streamId, int parentStreamId, boolean exclusive, int weight)
|
||||
throws Http2Exception;
|
||||
|
||||
// Reset frames
|
||||
void reset(int streamId, long errorCode) throws Http2Exception;
|
||||
|
||||
// Settings frames
|
||||
void setting(Setting setting, long value) throws ConnectionException;
|
||||
void settingsEnd(boolean ack) throws IOException;
|
||||
|
||||
// Ping frames
|
||||
void pingReceive(byte[] payload, boolean ack) throws IOException;
|
||||
|
||||
// Goaway
|
||||
void goaway(int lastStreamId, long errorCode, String debugData);
|
||||
|
||||
// Window size
|
||||
void incrementWindowSize(int streamId, int increment) throws Http2Exception;
|
||||
|
||||
// Testing
|
||||
void swallowed(int streamId, FrameType frameType, int flags, int size) throws IOException;
|
||||
}
|
||||
}
|
||||
423
java/org/apache/coyote/http2/Http2Protocol.java
Normal file
423
java/org/apache/coyote/http2/Http2Protocol.java
Normal file
@@ -0,0 +1,423 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.coyote.Adapter;
|
||||
import org.apache.coyote.CompressionConfig;
|
||||
import org.apache.coyote.Processor;
|
||||
import org.apache.coyote.Request;
|
||||
import org.apache.coyote.Response;
|
||||
import org.apache.coyote.UpgradeProtocol;
|
||||
import org.apache.coyote.UpgradeToken;
|
||||
import org.apache.coyote.http11.AbstractHttp11Protocol;
|
||||
import org.apache.coyote.http11.upgrade.InternalHttpUpgradeHandler;
|
||||
import org.apache.coyote.http11.upgrade.UpgradeProcessorInternal;
|
||||
import org.apache.tomcat.util.buf.StringUtils;
|
||||
import org.apache.tomcat.util.net.SocketWrapperBase;
|
||||
|
||||
public class Http2Protocol implements UpgradeProtocol {
|
||||
|
||||
static final long DEFAULT_READ_TIMEOUT = 5000;
|
||||
static final long DEFAULT_WRITE_TIMEOUT = 5000;
|
||||
static final long DEFAULT_KEEP_ALIVE_TIMEOUT = 20000;
|
||||
static final long DEFAULT_STREAM_READ_TIMEOUT = 20000;
|
||||
static final long DEFAULT_STREAM_WRITE_TIMEOUT = 20000;
|
||||
// The HTTP/2 specification recommends a minimum default of 100
|
||||
static final long DEFAULT_MAX_CONCURRENT_STREAMS = 100;
|
||||
// Maximum amount of streams which can be concurrently executed over
|
||||
// a single connection
|
||||
static final int DEFAULT_MAX_CONCURRENT_STREAM_EXECUTION = 20;
|
||||
|
||||
static final int DEFAULT_OVERHEAD_COUNT_FACTOR = 1;
|
||||
static final int DEFAULT_OVERHEAD_CONTINUATION_THRESHOLD = 1024;
|
||||
static final int DEFAULT_OVERHEAD_DATA_THRESHOLD = 1024;
|
||||
static final int DEFAULT_OVERHEAD_WINDOW_UPDATE_THRESHOLD = 1024;
|
||||
|
||||
private static final String HTTP_UPGRADE_NAME = "h2c";
|
||||
private static final String ALPN_NAME = "h2";
|
||||
private static final byte[] ALPN_IDENTIFIER = ALPN_NAME.getBytes(StandardCharsets.UTF_8);
|
||||
|
||||
// All timeouts in milliseconds
|
||||
// These are the socket level timeouts
|
||||
private long readTimeout = DEFAULT_READ_TIMEOUT;
|
||||
private long writeTimeout = DEFAULT_WRITE_TIMEOUT;
|
||||
private long keepAliveTimeout = DEFAULT_KEEP_ALIVE_TIMEOUT;
|
||||
// These are the stream level timeouts
|
||||
private long streamReadTimeout = DEFAULT_STREAM_READ_TIMEOUT;
|
||||
private long streamWriteTimeout = DEFAULT_STREAM_WRITE_TIMEOUT;
|
||||
|
||||
private long maxConcurrentStreams = DEFAULT_MAX_CONCURRENT_STREAMS;
|
||||
private int maxConcurrentStreamExecution = DEFAULT_MAX_CONCURRENT_STREAM_EXECUTION;
|
||||
// To advertise a different default to the client specify it here but DO NOT
|
||||
// change the default defined in ConnectionSettingsBase.
|
||||
private int initialWindowSize = ConnectionSettingsBase.DEFAULT_INITIAL_WINDOW_SIZE;
|
||||
// Limits
|
||||
private Set<String> allowedTrailerHeaders =
|
||||
Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
|
||||
private int maxHeaderCount = Constants.DEFAULT_MAX_HEADER_COUNT;
|
||||
private int maxHeaderSize = Constants.DEFAULT_MAX_HEADER_SIZE;
|
||||
private int maxTrailerCount = Constants.DEFAULT_MAX_TRAILER_COUNT;
|
||||
private int maxTrailerSize = Constants.DEFAULT_MAX_TRAILER_SIZE;
|
||||
private int overheadCountFactor = DEFAULT_OVERHEAD_COUNT_FACTOR;
|
||||
private int overheadContinuationThreshold = DEFAULT_OVERHEAD_CONTINUATION_THRESHOLD;
|
||||
private int overheadDataThreshold = DEFAULT_OVERHEAD_DATA_THRESHOLD;
|
||||
private int overheadWindowUpdateThreshold = DEFAULT_OVERHEAD_WINDOW_UPDATE_THRESHOLD;
|
||||
|
||||
private boolean initiatePingDisabled = false;
|
||||
// Compression
|
||||
private final CompressionConfig compressionConfig = new CompressionConfig();
|
||||
// Reference to HTTP/1.1 protocol that this instance is configured under
|
||||
private AbstractHttp11Protocol<?> http11Protocol = null;
|
||||
|
||||
@Override
|
||||
public String getHttpUpgradeName(boolean isSSLEnabled) {
|
||||
if (isSSLEnabled) {
|
||||
return null;
|
||||
} else {
|
||||
return HTTP_UPGRADE_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getAlpnIdentifier() {
|
||||
return ALPN_IDENTIFIER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAlpnName() {
|
||||
return ALPN_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Processor getProcessor(SocketWrapperBase<?> socketWrapper, Adapter adapter) {
|
||||
UpgradeProcessorInternal processor = new UpgradeProcessorInternal(socketWrapper,
|
||||
new UpgradeToken(getInternalUpgradeHandler(adapter, null), null, null));
|
||||
return processor;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public InternalHttpUpgradeHandler getInternalUpgradeHandler(Adapter adapter,
|
||||
Request coyoteRequest) {
|
||||
Http2UpgradeHandler result = new Http2UpgradeHandler(this, adapter, coyoteRequest);
|
||||
|
||||
result.setReadTimeout(getReadTimeout());
|
||||
result.setKeepAliveTimeout(getKeepAliveTimeout());
|
||||
result.setWriteTimeout(getWriteTimeout());
|
||||
result.setMaxConcurrentStreams(getMaxConcurrentStreams());
|
||||
result.setMaxConcurrentStreamExecution(getMaxConcurrentStreamExecution());
|
||||
result.setInitialWindowSize(getInitialWindowSize());
|
||||
result.setAllowedTrailerHeaders(allowedTrailerHeaders);
|
||||
result.setMaxHeaderCount(getMaxHeaderCount());
|
||||
result.setMaxHeaderSize(getMaxHeaderSize());
|
||||
result.setMaxTrailerCount(getMaxTrailerCount());
|
||||
result.setMaxTrailerSize(getMaxTrailerSize());
|
||||
result.setInitiatePingDisabled(initiatePingDisabled);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean accept(Request request) {
|
||||
// Should only be one HTTP2-Settings header
|
||||
Enumeration<String> settings = request.getMimeHeaders().values("HTTP2-Settings");
|
||||
int count = 0;
|
||||
while (settings.hasMoreElements()) {
|
||||
count++;
|
||||
settings.nextElement();
|
||||
}
|
||||
if (count != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Enumeration<String> connection = request.getMimeHeaders().values("Connection");
|
||||
boolean found = false;
|
||||
while (connection.hasMoreElements() && !found) {
|
||||
found = connection.nextElement().contains("HTTP2-Settings");
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
|
||||
public long getReadTimeout() {
|
||||
return readTimeout;
|
||||
}
|
||||
|
||||
|
||||
public void setReadTimeout(long readTimeout) {
|
||||
this.readTimeout = readTimeout;
|
||||
}
|
||||
|
||||
|
||||
public long getWriteTimeout() {
|
||||
return writeTimeout;
|
||||
}
|
||||
|
||||
|
||||
public void setWriteTimeout(long writeTimeout) {
|
||||
this.writeTimeout = writeTimeout;
|
||||
}
|
||||
|
||||
|
||||
public long getKeepAliveTimeout() {
|
||||
return keepAliveTimeout;
|
||||
}
|
||||
|
||||
|
||||
public void setKeepAliveTimeout(long keepAliveTimeout) {
|
||||
this.keepAliveTimeout = keepAliveTimeout;
|
||||
}
|
||||
|
||||
|
||||
public long getStreamReadTimeout() {
|
||||
return streamReadTimeout;
|
||||
}
|
||||
|
||||
|
||||
public void setStreamReadTimeout(long streamReadTimeout) {
|
||||
this.streamReadTimeout = streamReadTimeout;
|
||||
}
|
||||
|
||||
|
||||
public long getStreamWriteTimeout() {
|
||||
return streamWriteTimeout;
|
||||
}
|
||||
|
||||
|
||||
public void setStreamWriteTimeout(long streamWriteTimeout) {
|
||||
this.streamWriteTimeout = streamWriteTimeout;
|
||||
}
|
||||
|
||||
|
||||
public long getMaxConcurrentStreams() {
|
||||
return maxConcurrentStreams;
|
||||
}
|
||||
|
||||
|
||||
public void setMaxConcurrentStreams(long maxConcurrentStreams) {
|
||||
this.maxConcurrentStreams = maxConcurrentStreams;
|
||||
}
|
||||
|
||||
|
||||
public int getMaxConcurrentStreamExecution() {
|
||||
return maxConcurrentStreamExecution;
|
||||
}
|
||||
|
||||
|
||||
public void setMaxConcurrentStreamExecution(int maxConcurrentStreamExecution) {
|
||||
this.maxConcurrentStreamExecution = maxConcurrentStreamExecution;
|
||||
}
|
||||
|
||||
|
||||
public int getInitialWindowSize() {
|
||||
return initialWindowSize;
|
||||
}
|
||||
|
||||
|
||||
public void setInitialWindowSize(int initialWindowSize) {
|
||||
this.initialWindowSize = initialWindowSize;
|
||||
}
|
||||
|
||||
|
||||
public void setAllowedTrailerHeaders(String commaSeparatedHeaders) {
|
||||
// Jump through some hoops so we don't end up with an empty set while
|
||||
// doing updates.
|
||||
Set<String> toRemove = new HashSet<>();
|
||||
toRemove.addAll(allowedTrailerHeaders);
|
||||
if (commaSeparatedHeaders != null) {
|
||||
String[] headers = commaSeparatedHeaders.split(",");
|
||||
for (String header : headers) {
|
||||
String trimmedHeader = header.trim().toLowerCase(Locale.ENGLISH);
|
||||
if (toRemove.contains(trimmedHeader)) {
|
||||
toRemove.remove(trimmedHeader);
|
||||
} else {
|
||||
allowedTrailerHeaders.add(trimmedHeader);
|
||||
}
|
||||
}
|
||||
allowedTrailerHeaders.removeAll(toRemove);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public String getAllowedTrailerHeaders() {
|
||||
// Chances of a size change between these lines are small enough that a
|
||||
// sync is unnecessary.
|
||||
List<String> copy = new ArrayList<>(allowedTrailerHeaders.size());
|
||||
copy.addAll(allowedTrailerHeaders);
|
||||
return StringUtils.join(copy);
|
||||
}
|
||||
|
||||
|
||||
public void setMaxHeaderCount(int maxHeaderCount) {
|
||||
this.maxHeaderCount = maxHeaderCount;
|
||||
}
|
||||
|
||||
|
||||
public int getMaxHeaderCount() {
|
||||
return maxHeaderCount;
|
||||
}
|
||||
|
||||
|
||||
public void setMaxHeaderSize(int maxHeaderSize) {
|
||||
this.maxHeaderSize = maxHeaderSize;
|
||||
}
|
||||
|
||||
|
||||
public int getMaxHeaderSize() {
|
||||
return maxHeaderSize;
|
||||
}
|
||||
|
||||
|
||||
public void setMaxTrailerCount(int maxTrailerCount) {
|
||||
this.maxTrailerCount = maxTrailerCount;
|
||||
}
|
||||
|
||||
|
||||
public int getMaxTrailerCount() {
|
||||
return maxTrailerCount;
|
||||
}
|
||||
|
||||
|
||||
public void setMaxTrailerSize(int maxTrailerSize) {
|
||||
this.maxTrailerSize = maxTrailerSize;
|
||||
}
|
||||
|
||||
|
||||
public int getMaxTrailerSize() {
|
||||
return maxTrailerSize;
|
||||
}
|
||||
|
||||
|
||||
public int getOverheadCountFactor() {
|
||||
return overheadCountFactor;
|
||||
}
|
||||
|
||||
|
||||
public void setOverheadCountFactor(int overheadCountFactor) {
|
||||
this.overheadCountFactor = overheadCountFactor;
|
||||
}
|
||||
|
||||
|
||||
public int getOverheadContinuationThreshold() {
|
||||
return overheadContinuationThreshold;
|
||||
}
|
||||
|
||||
|
||||
public void setOverheadContinuationThreshold(int overheadContinuationThreshold) {
|
||||
this.overheadContinuationThreshold = overheadContinuationThreshold;
|
||||
}
|
||||
|
||||
|
||||
public int getOverheadDataThreshold() {
|
||||
return overheadDataThreshold;
|
||||
}
|
||||
|
||||
|
||||
public void setOverheadDataThreshold(int overheadDataThreshold) {
|
||||
this.overheadDataThreshold = overheadDataThreshold;
|
||||
}
|
||||
|
||||
|
||||
public int getOverheadWindowUpdateThreshold() {
|
||||
return overheadWindowUpdateThreshold;
|
||||
}
|
||||
|
||||
|
||||
public void setOverheadWindowUpdateThreshold(int overheadWindowUpdateThreshold) {
|
||||
this.overheadWindowUpdateThreshold = overheadWindowUpdateThreshold;
|
||||
}
|
||||
|
||||
|
||||
public void setInitiatePingDisabled(boolean initiatePingDisabled) {
|
||||
this.initiatePingDisabled = initiatePingDisabled;
|
||||
}
|
||||
|
||||
|
||||
public void setCompression(String compression) {
|
||||
compressionConfig.setCompression(compression);
|
||||
}
|
||||
public String getCompression() {
|
||||
return compressionConfig.getCompression();
|
||||
}
|
||||
protected int getCompressionLevel() {
|
||||
return compressionConfig.getCompressionLevel();
|
||||
}
|
||||
|
||||
|
||||
public String getNoCompressionUserAgents() {
|
||||
return compressionConfig.getNoCompressionUserAgents();
|
||||
}
|
||||
protected Pattern getNoCompressionUserAgentsPattern() {
|
||||
return compressionConfig.getNoCompressionUserAgentsPattern();
|
||||
}
|
||||
public void setNoCompressionUserAgents(String noCompressionUserAgents) {
|
||||
compressionConfig.setNoCompressionUserAgents(noCompressionUserAgents);
|
||||
}
|
||||
|
||||
|
||||
public String getCompressibleMimeType() {
|
||||
return compressionConfig.getCompressibleMimeType();
|
||||
}
|
||||
public void setCompressibleMimeType(String valueS) {
|
||||
compressionConfig.setCompressibleMimeType(valueS);
|
||||
}
|
||||
public String[] getCompressibleMimeTypes() {
|
||||
return compressionConfig.getCompressibleMimeTypes();
|
||||
}
|
||||
|
||||
|
||||
public int getCompressionMinSize() {
|
||||
return compressionConfig.getCompressionMinSize();
|
||||
}
|
||||
public void setCompressionMinSize(int compressionMinSize) {
|
||||
compressionConfig.setCompressionMinSize(compressionMinSize);
|
||||
}
|
||||
|
||||
|
||||
@Deprecated
|
||||
public boolean getNoCompressionStrongETag() {
|
||||
return compressionConfig.getNoCompressionStrongETag();
|
||||
}
|
||||
@Deprecated
|
||||
public void setNoCompressionStrongETag(boolean noCompressionStrongETag) {
|
||||
compressionConfig.setNoCompressionStrongETag(noCompressionStrongETag);
|
||||
}
|
||||
|
||||
|
||||
public boolean useCompression(Request request, Response response) {
|
||||
return compressionConfig.useCompression(request, response);
|
||||
}
|
||||
|
||||
|
||||
public AbstractHttp11Protocol<?> getHttp11Protocol() {
|
||||
return this.http11Protocol;
|
||||
}
|
||||
public void setHttp11Protocol(AbstractHttp11Protocol<?> http11Protocol) {
|
||||
this.http11Protocol = http11Protocol;
|
||||
}
|
||||
}
|
||||
1989
java/org/apache/coyote/http2/Http2UpgradeHandler.java
Normal file
1989
java/org/apache/coyote/http2/Http2UpgradeHandler.java
Normal file
File diff suppressed because it is too large
Load Diff
162
java/org/apache/coyote/http2/LocalStrings.properties
Normal file
162
java/org/apache/coyote/http2/LocalStrings.properties
Normal file
@@ -0,0 +1,162 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeDec=Connection [{0}], Stream [{1}], reduce flow control window by [{2}] to [{3}]
|
||||
abstractStream.windowSizeInc=Connection [{0}], Stream [{1}], increase flow control window by [{2}] to [{3}]
|
||||
abstractStream.windowSizeTooBig=Connection [{0}], Stream [{1}], increased window size by [{2}] to [{3}] which exceeded permitted maximum
|
||||
|
||||
connectionPrefaceParser.eos=Unexpected end of stream while reading opening client preface byte sequence. Only [{0}] bytes read.
|
||||
connectionPrefaceParser.ioError=Failed to read opening client preface byte sequence
|
||||
connectionPrefaceParser.mismatch=An unexpected byte sequence was received at the start of the client preface [{0}]
|
||||
|
||||
connectionSettings.debug=Connection [{0}], Endpoint [{1}], Parameter type [{2}] set to [{3}]
|
||||
connectionSettings.enablePushInvalid=Connection [{0}], The requested value for enable push [{1}] is not one of the permitted values (zero or one)
|
||||
connectionSettings.headerTableSizeLimit=Connection [{0}], Attempted to set a header table size of [{1}] but the limit is 16k
|
||||
connectionSettings.maxFrameSizeInvalid=Connection [{0}], The requested maximum frame size of [{1}] is outside the permitted range of [{2}] to [{3}]
|
||||
connectionSettings.unknown=Connection [{0}], An unknown setting with identifier [{1}] and value [{2}] was ignored
|
||||
connectionSettings.windowSizeTooBig=Connection [{0}], The requested window size of [{1}] is bigger than the maximum permitted value of [{2}]
|
||||
|
||||
frameType.checkPayloadSize=Payload size of [{0}] is not valid for frame type [{1}]
|
||||
frameType.checkStream=Invalid frame type [{0}]
|
||||
|
||||
hpack.integerEncodedOverTooManyOctets=HPACK variable length integer encoded over too many octets, max is [{0}]
|
||||
hpack.invalidCharacter=The Unicode character [{0}] at code point [{1}] cannot be encoded as it is outside the permitted range of 0 to 255.
|
||||
|
||||
hpackEncoder.encodeHeader=Encoding header [{0}] with value [{1}]
|
||||
|
||||
hpackdecoder.headerTableIndexInvalid=The header table index [{0}] is not valid as there are [{1}] static entries and [{2}] dynamic entries
|
||||
hpackdecoder.maxMemorySizeExceeded=The header table size [{0}] exceeds the maximum size [{1}]
|
||||
hpackdecoder.notImplemented=Not yet implemented
|
||||
hpackdecoder.nullHeader=Null header at index [{0}]
|
||||
hpackdecoder.tableSizeUpdateNotAtStart=Any table size update must be sent at the start of a header block
|
||||
hpackdecoder.zeroNotValidHeaderTableIndex=Zero is not a valid header table index
|
||||
|
||||
hpackhuffman.huffmanEncodedHpackValueDidNotEndWithEOS=Huffman encoded value in HPACK headers did not end with EOS padding
|
||||
hpackhuffman.stringLiteralEOS=Huffman encoded value in HPACK headers contained the EOS symbol
|
||||
hpackhuffman.stringLiteralTooMuchPadding=More than 7 bits of EOS padding were provided at the end of an Huffman encoded string literal
|
||||
|
||||
http2Parser.headerLimitCount=Connection [{0}], Stream [{1}], Too many headers
|
||||
http2Parser.headerLimitSize=Connection [{0}], Stream [{1}], Total header size too big
|
||||
http2Parser.headers.wrongFrameType=Connection [{0}], headers in progress for stream [{1}] but a frame of type [{2}] was received
|
||||
http2Parser.headers.wrongStream=Connection [{0}], headers in progress for stream [{1}] but a frame for stream [{2}] was received
|
||||
http2Parser.nonZeroPadding=Connection [{0}], Stream [{1}], Non-zero padding received
|
||||
http2Parser.payloadTooBig=The payload is [{0}] bytes long but the maximum frame size is [{1}]
|
||||
http2Parser.preface.invalid=Invalid connection preface presented
|
||||
http2Parser.preface.io=Unable to read connection preface
|
||||
http2Parser.processFrame=Connection [{0}], Stream [{1}], Frame type [{2}], Flags [{3}], Payload size [{4}]
|
||||
http2Parser.processFrame.tooMuchPadding=Connection [{0}], Stream [{1}], The padding length [{2}] was too big for the payload [{3}]
|
||||
http2Parser.processFrame.unexpectedType=Expected frame type [{0}] but received frame type [{1}]
|
||||
http2Parser.processFrameContinuation.notExpected=Connection [{0}], Continuation frame received for stream [{1}] when no headers were in progress
|
||||
http2Parser.processFrameData.lengths=Connection [{0}], Stream [{1}], Data length, [{2}], Padding length [{3}]
|
||||
http2Parser.processFrameData.window=Connection [{0}], Client sent more data than stream window allowed
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=Data left over after HPACK decoding - it should have been consumed
|
||||
http2Parser.processFrameHeaders.decodingFailed=There was an error during the HPACK decoding of HTTP headers
|
||||
http2Parser.processFrameHeaders.payload=Connection [{0}], Stream [{1}], Processing headers payload of size [{2}]
|
||||
http2Parser.processFramePriority.invalidParent=Connection [{0}], Stream [{1}], A stream may not depend on itself
|
||||
http2Parser.processFramePushPromise=Connection [{0}], Stream [{1}], Push promise frames should not be sent by the client
|
||||
http2Parser.processFrameSettings.ackWithNonZeroPayload=Settings frame received with the ACK flag set and payload present
|
||||
http2Parser.processFrameWindowUpdate.debug=Connection [{0}], Stream [{1}], Window size increment [{2}]
|
||||
http2Parser.processFrameWindowUpdate.invalidIncrement=Window update frame received with an invalid increment size of [{0}]
|
||||
http2Parser.swallow.debug=Connection [{0}], Stream [{1}], Swallowed [{2}] bytes
|
||||
|
||||
pingManager.roundTripTime=Connection [{0}] Round trip time measured as [{1}]ns
|
||||
|
||||
stream.closed=Connection [{0}], Stream [{1}], Unable to write to stream once it has been closed
|
||||
stream.header.case=Connection [{0}], Stream [{1}], HTTP header name [{2}] must be in lower case
|
||||
stream.header.connection=Connection [{0}], Stream [{1}], HTTP header [connection] is not permitted in an HTTP/2 request
|
||||
stream.header.contentLength=Connection [{0}], Stream [{1}], The content length header value [{2}] does not agree with the size of the data received [{3}]
|
||||
stream.header.debug=Connection [{0}], Stream [{1}], HTTP header [{2}], Value [{3}]
|
||||
stream.header.duplicate=Connection [{0}], Stream [{1}], received multiple [{3}] headers
|
||||
stream.header.invalid=Connection [{0}], Stream [{1}], The header [{2}] contained invalid value [{3}]
|
||||
stream.header.noPath=Connection [{0}], Stream [{1}], The [:path] pseudo header was empty
|
||||
stream.header.required=Connection [{0}], Stream [{1}], One or more required headers was missing
|
||||
stream.header.te=Connection [{0}], Stream [{1}], HTTP header [te] is not permitted to have the value [{2}] in an HTTP/2 request
|
||||
stream.header.unexpectedPseudoHeader=Connection [{0}], Stream [{1}], Pseudo header [{2}] received after a regular header
|
||||
stream.header.unknownPseudoHeader=Connection [{0}], Stream [{1}], Unknown pseudo header [{2}] received
|
||||
stream.inputBuffer.copy=Copying [{0}] bytes from inBuffer to outBuffer
|
||||
stream.inputBuffer.dispatch=Data added to inBuffer when read interest is registered. Triggering a read dispatch
|
||||
stream.inputBuffer.empty=The Stream input buffer is empty. Waiting for more data
|
||||
stream.inputBuffer.readTimeout=Timeout waiting to read data from client
|
||||
stream.inputBuffer.reset=Stream reset
|
||||
stream.inputBuffer.signal=Data added to inBuffer when read thread is waiting. Signalling that thread to continue
|
||||
stream.notWritable=Connection [{0}], Stream [{1}], This stream is not writable
|
||||
stream.outputBuffer.flush.debug=Connection [{0}], Stream [{1}], flushing output with buffer at position [{2}], writeInProgress [{3}] and closed [{4}]
|
||||
stream.reprioritisation.debug=Connection [{0}], Stream [{1}], Exclusive [{2}], Parent [{3}], Weight [{4}]
|
||||
stream.reset.fail=Connection [{0}], Stream [{1}], Failed to reset stream
|
||||
stream.reset.receive=Connection [{0}], Stream [{1}], Reset received due to [{2}]
|
||||
stream.reset.send=Connection [{0}], Stream [{1}], Reset sent due to [{2}]
|
||||
stream.trailerHeader.noEndOfStream=Connection [{0}], Stream [{1}], The trailer headers did not include the end of stream flag
|
||||
stream.writeTimeout=Timeout waiting for client to increase flow control window to permit stream data to be written
|
||||
|
||||
streamProcessor.cancel=Connection [{0}], Stream [{1}], The remaining request body is not required.
|
||||
streamProcessor.error.connection=Connection [{0}], Stream [{1}], An error occurred during processing that was fatal to the connection
|
||||
streamProcessor.error.stream=Connection [{0}], Stream [{1}], An error occurred during processing that was fatal to the stream
|
||||
streamProcessor.flushBufferedWrite.entry=Connection [{0}], Stream [{1}], Flushing buffered writes
|
||||
streamProcessor.service.error=Error during request processing
|
||||
|
||||
streamStateMachine.debug.change=Connection [{0}], Stream [{1}], State changed from [{2}] to [{3}]
|
||||
streamStateMachine.invalidFrame=Connection [{0}], Stream [{1}], State [{2}], Frame type [{3}]
|
||||
|
||||
upgradeHandler.allocate.debug=Connection [{0}], Stream [{1}], allocated [{2}] bytes
|
||||
upgradeHandler.allocate.left=Connection [{0}], Stream [{1}], [{2}] bytes unallocated - trying to allocate to children
|
||||
upgradeHandler.allocate.recipient=Connection [{0}], Stream [{1}], potential recipient [{2}] with weight [{3}]
|
||||
upgradeHandler.connectionError=Connection error
|
||||
upgradeHandler.dependency.invalid=Connection [{0}], Stream [{1}], Streams may not depend on themselves
|
||||
upgradeHandler.goaway.debug=Connection [{0}], Goaway, Last stream [{1}], Error code [{2}], Debug data [{3}]
|
||||
upgradeHandler.init=Connection [{0}], State [{1}]
|
||||
upgradeHandler.initialWindowSize.invalid=Connection [{0}], Illegal value of [{1}] ignored for initial window size
|
||||
upgradeHandler.invalidPreface=Connection [{0}], Invalid connection preface
|
||||
upgradeHandler.ioerror=Connection [{0}]
|
||||
upgradeHandler.noAllocation=Connection [{0}], Stream [{1}], Timeout waiting for allocation
|
||||
upgradeHandler.noNewStreams=Connection [{0}], Stream [{1}], Stream ignored as no new streams are permitted on this connection
|
||||
upgradeHandler.pause.entry=Connection [{0}] Pausing
|
||||
upgradeHandler.pingFailed=Connection [{0}] Failed to send ping to client
|
||||
upgradeHandler.prefaceReceived=Connection [{0}], Connection preface received from client
|
||||
upgradeHandler.pruneIncomplete=Connection [{0}], Stream [{1}], Failed to fully prune the connection because there are [{2}] too many active streams
|
||||
upgradeHandler.pruneStart=Connection [{0}] Starting pruning of old streams. Limit is [{1}] + 10% and there are currently [{2}] streams.
|
||||
upgradeHandler.pruned=Connection [{0}] Pruned completed stream [{1}]
|
||||
upgradeHandler.prunedPriority=Connection [{0}] Pruned unused stream [{1}] that may have been part of the priority tree
|
||||
upgradeHandler.releaseBacklog=Connection [{0}], Stream [{1}] released from backlog
|
||||
upgradeHandler.rst.debug=Connection [{0}], Stream [{1}], Error [{2}], Message [{3}], RST (closing stream)
|
||||
upgradeHandler.sendPrefaceFail=Connection [{0}], Failed to send preface to client
|
||||
upgradeHandler.socketCloseFailed=Error closing socket
|
||||
upgradeHandler.stream.closed=Stream [{0}] has been closed for some time
|
||||
upgradeHandler.stream.even=A new remote stream ID of [{0}] was requested but all remote streams must use odd identifiers
|
||||
upgradeHandler.stream.notWritable=Connection [{0}], Stream [{1}], This stream is not writable
|
||||
upgradeHandler.stream.old=A new remote stream ID of [{0}] was requested but the most recent stream was [{1}]
|
||||
upgradeHandler.tooManyRemoteStreams=The client attempted to use more than [{0}] active streams
|
||||
upgradeHandler.tooMuchOverhead=Connection [{0}], Too much overhead so the connection will be closed
|
||||
upgradeHandler.unexpectedAck=Connection [{0}], Stream [{1}], A settings acknowledgement was received when not expected
|
||||
upgradeHandler.upgrade=Connection [{0}], HTTP/1.1 upgrade to stream [1]
|
||||
upgradeHandler.upgrade.fail=Connection [{0}], HTTP/1.1 upgrade failed
|
||||
upgradeHandler.upgradeDispatch.entry=Entry, Connection [{0}], SocketStatus [{1}]
|
||||
upgradeHandler.upgradeDispatch.exit=Exit, Connection [{0}], SocketState [{1}]
|
||||
upgradeHandler.windowSizeReservationInterrupted=Connection [{0}], Stream [{1}], reservation for [{2}] bytes
|
||||
upgradeHandler.windowSizeTooBig=Connection [{0}], Stream [{1}], Window size too big
|
||||
upgradeHandler.writeBody=Connection [{0}], Stream [{1}], Data length [{2}]
|
||||
upgradeHandler.writeHeaders=Connection [{0}], Stream [{1}]
|
||||
upgradeHandler.writePushHeaders=Connection [{0}], Stream [{1}], Pushed stream [{2}], EndOfStream [{3}]
|
||||
|
||||
windowAllocationManager.dispatched=Connection [{0}], Stream [{1}], Dispatched
|
||||
windowAllocationManager.notified=Connection [{0}], Stream [{1}], Notified
|
||||
windowAllocationManager.notify=Connection [{0}], Stream [{1}], Waiting type [{2}], Notify type [{3}]
|
||||
windowAllocationManager.waitFor.connection=Connection [{0}], Stream [{1}], Waiting for Connection flow control window (blocking) with timeout [{2}]
|
||||
windowAllocationManager.waitFor.ise=Connection [{0}], Stream [{1}], Already waiting
|
||||
windowAllocationManager.waitFor.stream=Connection [{0}], Stream [{1}], Waiting for Stream flow control window (blocking) with timeout [{2}]
|
||||
windowAllocationManager.waitForNonBlocking.connection=Connection [{0}], Stream [{1}], Waiting for Connection flow control window (non-blocking)
|
||||
windowAllocationManager.waitForNonBlocking.stream=Connection [{0}], Stream [{1}], Waiting for Stream flow control window (non-blocking)
|
||||
|
||||
writeStateMachine.endWrite.ise=It is illegal to specify [{0}] for the new state once a write has completed
|
||||
writeStateMachine.ise=It is illegal to call [{0}()] in state [{1}]
|
||||
42
java/org/apache/coyote/http2/LocalStrings_de.properties
Normal file
42
java/org/apache/coyote/http2/LocalStrings_de.properties
Normal file
@@ -0,0 +1,42 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeInc=Verbindung [{0}], Stream [{1}], erhöhe Flow Contrrol Window um [{2}] auf [{3}]
|
||||
|
||||
connectionPrefaceParser.mismatch=Es wurde eine unerwartete Byte Sequenz beim Start der Client Preface Phase [{0}] empfangen
|
||||
|
||||
connectionSettings.debug=Verbindung [{0}], Parameter typ [{1}] gesetzt auf [{2}]
|
||||
connectionSettings.headerTableSizeLimit=Verbindung [{0}], versuchte die Kopfzeilentabellengröße auf [{1}] zu setzen aber die Grenze liegt bei 16k
|
||||
|
||||
hpack.invalidCharacter=Das Unicode Zeichen [{0}] an Code Punkt [{1}] kann nicht kodiert werden, da es außerhalb des erlaubten Bereiches von 0 bis 255 ist.
|
||||
|
||||
hpackdecoder.maxMemorySizeExceeded=Die header table Größe [{0}] überschreitet die maximale Größe von [{1}]
|
||||
hpackdecoder.nullHeader=Null header bei Index [{0}]
|
||||
|
||||
http2Parser.headerLimitSize=Verbindung [{0}], Stream [{1}], Gesamt-Header-Größe zu groß
|
||||
http2Parser.processFrameData.window=Verbindung [{0}], Client hat mehr Daten gesendet als das Stream-Fenster zulässt
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=Nach der HPACK-Dekodierung sind noch Daten übrig - die hätten verarbeitet sein sollen
|
||||
|
||||
stream.header.unknownPseudoHeader=Verbindung [{0}], Stream [{1}], Unbekannten Pseudo-Header [{2}] empfangen
|
||||
|
||||
streamProcessor.service.error=Fehler bei der Anfrageverarbeitung
|
||||
|
||||
upgradeHandler.ioerror=Verbindung [{0}]
|
||||
upgradeHandler.pingFailed=Verbindung [{0}] – Das Senden eines ''ping'' zum Klienten schlug fehl.
|
||||
upgradeHandler.socketCloseFailed=Fehler beim Schließen des Sockets.
|
||||
upgradeHandler.upgrade=Verbindung [{0}], HTTP/1.1 Upgrade auf Stream [1]
|
||||
upgradeHandler.upgrade.fail=Verbindung [{0}], HTTP/1.1 upgrade fehlgeschlagen
|
||||
upgradeHandler.windowSizeTooBig=Verbindung [{0}], Stream [{1}], Fenster-Größe zu groß
|
||||
upgradeHandler.writeHeaders=Verbindung [{0}], Stream [{1}]
|
||||
61
java/org/apache/coyote/http2/LocalStrings_es.properties
Normal file
61
java/org/apache/coyote/http2/LocalStrings_es.properties
Normal file
@@ -0,0 +1,61 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeInc=\n\
|
||||
Conexión [{0}], Flujo [{1}], aumente el control de flujo de la ventana en [{2}] para [{3}]\n
|
||||
|
||||
connectionPrefaceParser.mismatch=Una sequencia de byte no esperada fue recibida al inicio del prefacio de cliente [{0}]
|
||||
|
||||
connectionSettings.debug=Conexión [{0}], Parámetro tipo [{1}] fijado a [{2}]
|
||||
connectionSettings.headerTableSizeLimit=La conexión [{0}], intentó fijar un tamaño de cabecera de [{1}] pero el límite es 16k\n
|
||||
connectionSettings.maxFrameSizeInvalid=Conexión [{0}], El tamaño de cuadro máximo solicitado de [{1}] esta fuera del rango permitido de [{2}] hasta [{3}]\n
|
||||
connectionSettings.unknown=Conexión [{0}], Un parámetro desconocido con identificador [{1}] y valor [{2}] fue ignorado\n
|
||||
|
||||
hpack.invalidCharacter=El carácter Unicode [{0}] en el punto del código [{1}] no puede ser codificado al estar fuera del rango permitido de 0 a 255.
|
||||
|
||||
http2Parser.headerLimitSize=Conexión [{0}], Flujo [{1}], Tamaño total de la cabecera my grade
|
||||
http2Parser.headers.wrongStream=La conexión [{0}], tiene cabeceras en progreso para stream [{1}] pero un frame para stream [{2}] fue recibido
|
||||
http2Parser.preface.invalid=Se presentó un prefacio de conexión inválido
|
||||
http2Parser.processFrameData.window=Conexión [{0}], El cliente mandó más datos que los permitidos por el flujo de ventana.
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=Datos sobrantes luego de decodificar HPACK - Estos datos se deberían haber consumido
|
||||
http2Parser.processFramePushPromise=Conexión [{0}], Flujo [{1}], Push promise frames no deben ser enviadas por el cliente
|
||||
|
||||
stream.closed=Conexión [{0}], Flujo [{1}], Imposible escribir en el flujo una vez que ha sido crear
|
||||
stream.header.debug=Conexión [{0}], Flujo [{1}], cabecera HTTP [{2}], Valor [{3}]\n
|
||||
stream.header.noPath=Conexión [{0}], Flujo [{1}], El [:path] de la seudo cabecera estaba vacía
|
||||
stream.header.unknownPseudoHeader=Conexión [{0}], Flujo [{1}], Se recibió una Pseudo cabecera desconocida [{2}]
|
||||
stream.inputBuffer.reset=Reinicio de flujo
|
||||
stream.inputBuffer.signal=Se adicionaron datos al inBuffer cuando el hilo esta esperando. Señalizando al hilo que a continuar
|
||||
stream.reprioritisation.debug=Conexión [{0}], Flujo [{1}], Exclusivo [{2}], Padre [{3}], Peso [{4}]
|
||||
|
||||
streamProcessor.error.connection=Conexión [{0}], Flujo [{1}], Ha ocurrido un error el procesamiento que fue fatal para la conexión
|
||||
|
||||
streamStateMachine.debug.change=Conexión [{0}], Flujo [{1}], Estado cambió de [{2}] a [{3}]
|
||||
|
||||
upgradeHandler.allocate.left=Conexión [{0}], Flujo [{1}], [{2}] bytes no asignados - tratando de asignar en el hijo
|
||||
upgradeHandler.allocate.recipient=Conexión [{0}], Flujo [{1}], recipiente potencial [{2}] con peso [{3}]
|
||||
upgradeHandler.ioerror=Conexión [{0}]
|
||||
upgradeHandler.pingFailed=Conexión [{0}] falló al hacer ping al cliente
|
||||
upgradeHandler.prefaceReceived=Conexión [{0}], Pre face de conexión recibida del cliente\n
|
||||
upgradeHandler.pruneIncomplete=La conexión [{0}] Falló al podar completamente la conexión porque existen flujos activos / usados en el árbol de priorida. Existen [{2}] muchos flujos
|
||||
upgradeHandler.prunedPriority=La conexión [{0}] ha cortado el flujo en desuso [{1}] el cual podía ser parte del árbol prioritario
|
||||
upgradeHandler.rst.debug=Conexión [{0}], Flujo [{1}], Error [{2}], Mensaje [{3}], RST (cerrando flujo)
|
||||
upgradeHandler.sendPrefaceFail=La conexión [{0}], Falló al enviar el prefacio al cliente\n
|
||||
upgradeHandler.socketCloseFailed=Error cerrando el socket
|
||||
upgradeHandler.stream.even=Un nuevo flujo remoto con ID [{0}] fue solicitado, pero todos los flujos remotos deben usar identificadores raros
|
||||
upgradeHandler.upgrade=La conexión [{0}], HTTP/1.1 se actualizó al flujo [1]\n
|
||||
upgradeHandler.upgradeDispatch.entry=Entrada, Conexión [{0}], SocketStatus [{1}]\n
|
||||
upgradeHandler.upgradeDispatch.exit=Salida, Conexión [{0}], Estado de Socket [{1}]
|
||||
upgradeHandler.writeHeaders=Conexión [{0}], Flujo [{1}]
|
||||
162
java/org/apache/coyote/http2/LocalStrings_fr.properties
Normal file
162
java/org/apache/coyote/http2/LocalStrings_fr.properties
Normal file
@@ -0,0 +1,162 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeDec=Connection [{0}], Flux [{1}], réduction de la fenêtre de contrôle de flux de [{2}] à [{3}]
|
||||
abstractStream.windowSizeInc=Connection [{0}], Stream [{1}], augmentez la taille de la fenêtre de contrôle de flux de [{2}] à [{3}]
|
||||
abstractStream.windowSizeTooBig=Connection [{0}], Flux [{1}], L''augmentation de la taille de la fenêtre de [{2}] à [{3}] a excédé le maximum autorisé
|
||||
|
||||
connectionPrefaceParser.eos=Fin de flux inattendue lors de la lecture de la préface du client, seuls [{0}] octets ont été lus
|
||||
connectionPrefaceParser.ioError=Echec de la lecture des octets de la préface du client
|
||||
connectionPrefaceParser.mismatch=Une séquence inattendue d''octets a été recue au début de la préface client [{0}]
|
||||
|
||||
connectionSettings.debug=Connection [{0}], Paramètre type [{1}] mis à [{2}]
|
||||
connectionSettings.enablePushInvalid=Connection [{0}], La valeur demandée pour activer le push [{1}] n''est pas une de celles permises (zéro ou un)
|
||||
connectionSettings.headerTableSizeLimit=La Connection [{0}] a essayé de configurer une taille de [{1}] pour la table des en-têtes (headers), mais la limite est 16k
|
||||
connectionSettings.maxFrameSizeInvalid=Connection [{0}], la taille maximum de trame demandée [{1}] est en-dehors des limites permises [{2}] - [{3}]
|
||||
connectionSettings.unknown=Connection [{0}], Un paramètre inconnu avec l''identifiant [{1}] et la valeur [{2}] a été ignoré
|
||||
connectionSettings.windowSizeTooBig=Connection [{0}], La taille de fenêtre demandée [{1}] est plus grande que la valeur maximale autorisée [{2}]
|
||||
|
||||
frameType.checkPayloadSize=La taille de données [{0}] n''est pas valide pour une trame de type [{1}]
|
||||
frameType.checkStream=Type de trame invalide [{0}]
|
||||
|
||||
hpack.integerEncodedOverTooManyOctets=Un entier de taille variable de HPACK a été encodé sur trop d''octets, le maximum est de [{0}]
|
||||
hpack.invalidCharacter=Le caractère Unicode [{0}] ayant le code point [{1}] ne peut être encodé, parce qu''il est en-dehors de l''éventail permis 0-255.
|
||||
|
||||
hpackEncoder.encodeHeader=Encodage de l''en-tête [{0}] avec la valeur [{1}]
|
||||
|
||||
hpackdecoder.headerTableIndexInvalid=L''index [{0}] dans la table des en-têtes n''est pas valide car il y a [{1}] en-têtes statiques et [{2}] en-têtes dynamiques
|
||||
hpackdecoder.maxMemorySizeExceeded=La taille de la table des en-têtes [{0}] dépasse la taille maximale [{1}]
|
||||
hpackdecoder.notImplemented=Pas encore implémenté
|
||||
hpackdecoder.nullHeader=L''en-tête à l''index [{0}] est nul
|
||||
hpackdecoder.tableSizeUpdateNotAtStart=Toute mise à jour de la taille de la table doit être faite avant le début d'un bloc d'en-têtes
|
||||
hpackdecoder.zeroNotValidHeaderTableIndex=Zéro n'est pas un index valide dans la table des en-têtes
|
||||
|
||||
hpackhuffman.huffmanEncodedHpackValueDidNotEndWithEOS=La valeur encodée en Huffman dans les en-têtes HPACK n'avait pas de données tampon d'EOS
|
||||
hpackhuffman.stringLiteralEOS=La valeur encodée en Huffman dans les en-têtes HPACK contenait le symbole d'EOS
|
||||
hpackhuffman.stringLiteralTooMuchPadding=Plus de 7 bits de données tampon de fin de flux ont été fournis à la fin d'une chaîne encodée avec Huffman
|
||||
|
||||
http2Parser.headerLimitCount=Connection [{0}], Slux [{1}], Trop d''en-têtes
|
||||
http2Parser.headerLimitSize=Connection [{0}], Flux [{1}], La taille totale des en-têtes est trop grosse
|
||||
http2Parser.headers.wrongFrameType=Connection [{0}], Le traitement des en-têtes est en cours pour le flux [{1}] mais une trame de type [{2}] a été reçue
|
||||
http2Parser.headers.wrongStream=Connection [{0}], en têtes en cours pour le flux [{1}] mais une trame du flux [{2}] a été reçue
|
||||
http2Parser.nonZeroPadding=Connection [{0}], Stream [{1}], rembourrage (padding) non-zéro recu
|
||||
http2Parser.payloadTooBig=La taille des données est de [{0}] octets mais la taille maximale de la trame est de [{1}]
|
||||
http2Parser.preface.invalid=Une préface de connection invalide a été reçue
|
||||
http2Parser.preface.io=Impossible de lire la préface de la connection
|
||||
http2Parser.processFrame=Connection [{0}], Flux [{1}], Type de trame [{2}], Drapeaux [{3}], Taille des données [{4}]
|
||||
http2Parser.processFrame.tooMuchPadding=Connection [{0}], Flux [{1}], La taille [{2}] des données tampon est trop grosse pour la taille de données [{3}]
|
||||
http2Parser.processFrame.unexpectedType=Attendu une trame de type [{0}] mais reçu une trame de type [{1}]
|
||||
http2Parser.processFrameContinuation.notExpected=Connection [{0}], La trame de continuation a été reçue pour le flux [{1}] alors qu''aucun trainement d''en-têtes n''était en cours
|
||||
http2Parser.processFrameData.lengths=Connection [{0}], Flux [{1}], Taille des données, [{2}], Taille des données tampon [{3}]
|
||||
http2Parser.processFrameData.window=Connection [{0}], le client a envoyé plus de données que la "stream window" ne le permet
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=Des données restent après le décodage de HPACK, elles auraient dû être consommées
|
||||
http2Parser.processFrameHeaders.decodingFailed=Une erreur de décodage HPACK des en-têtes HTTP s'est produite
|
||||
http2Parser.processFrameHeaders.payload=Connection [{0}], Flux [{1}], Traitement des en-têtes avec une taille de données de [{2}]
|
||||
http2Parser.processFramePriority.invalidParent=Connection [{0}], Flux [{1}], Un flux ne peut pas dépendre de lui-même
|
||||
http2Parser.processFramePushPromise=Connexion [{0}], Flux (Stream) [{1}], les trames de promesse d''envoi ("Push promise frames") ne doivent pas être envoyées par le client.
|
||||
http2Parser.processFrameSettings.ackWithNonZeroPayload=La trame de paramètres a été reçue avec un indicateur ACK activé et des données présentes
|
||||
http2Parser.processFrameWindowUpdate.debug=Connection [{0}], Flux [{1}], Incrémentation de [{2}] de la taille de fenêtre
|
||||
http2Parser.processFrameWindowUpdate.invalidIncrement=La trame de mise à jour de la fenêtre a été reçue avec un incrément invalide [{0}]
|
||||
http2Parser.swallow.debug=Connection [{0}], Flux [{1}], Avalé [{2}] octets
|
||||
|
||||
pingManager.roundTripTime=Connection [{0}] Le temps d''aller retour est de [{1}]ns
|
||||
|
||||
stream.closed=Connection [{0}], Flux [{1}], Impossible d''écrire sur un flux après sa fermeture
|
||||
stream.header.case=Connection [{0}], Flux [{1}], Le nom d''en-tête HTTP [{2}] doit être en miniscules
|
||||
stream.header.connection=Connection [{0}], Flux [{1}], L''en-tête HTTP [connection] n''est pas autorisé dans une requête HTTP/2
|
||||
stream.header.contentLength=Connection [{0}], Flux [{1}], La valeur de l''en-tête content-length [{2}] ne correspond pas à la taille des données reçue [{3}]
|
||||
stream.header.debug=Connection [{0}], Flux [{1}], en-tête HTTP [{2}], valeur [{3}]
|
||||
stream.header.duplicate=Connection [{0}], Flux [{1}], Reçu plusieurs en-têtes [{3}]
|
||||
stream.header.invalid=Connection [{0}], Flux [{1}], L''en-tête[{2}] contenait la valeur invalide [{3}]
|
||||
stream.header.noPath=Connection [{0}], flux [{1}], Le [:path] pseudo en-tête est vide
|
||||
stream.header.required=Connection [{0}], Flux [{1}], Un ou plusieurs en-têtes nécessaires sont manquants
|
||||
stream.header.te=Connection [{0}], Flux [{1}], L''en-tête HTTP [te] n''est pas autorisé avec la valeur [{2}] dans une requête HTTP/2
|
||||
stream.header.unexpectedPseudoHeader=Connection [{0}], Flux [{1}], Le pseudo en-tête [{2}] a été reçu après un en-tête normal
|
||||
stream.header.unknownPseudoHeader=Connection [{0}], Flux [{1}], Un pseudo en-tête inconnu [{2}] a été reçu
|
||||
stream.inputBuffer.copy=Copide de [{0}] octets depuis inBuffer vers outBuffer
|
||||
stream.inputBuffer.dispatch=Des données on été ajoutées dans inBuffer alors que la lecture est surveillée, envoi d'un évènement de lecture
|
||||
stream.inputBuffer.empty=Le tampon d'entrée du flux est vide, attente de données
|
||||
stream.inputBuffer.readTimeout=Délai d'attente maximum dépassé pendant la lecture des données du client
|
||||
stream.inputBuffer.reset=Flux réinitialisé
|
||||
stream.inputBuffer.signal=Des données ont été ajoutées dans inBuffer alors que le thread de lecture attend, cela lui sera signalé
|
||||
stream.notWritable=Connection [{0}], Flux [{1}], Impossible d''écrire sur ce flux
|
||||
stream.outputBuffer.flush.debug=Connection [{0}], Flux [{1}], envoi des données mises en tampon depuis la position [{2}], writeInProgress [{3}] et closed [{4}]
|
||||
stream.reprioritisation.debug=Connection [{0}], Flux [{1}], Exclusive [{2}], Parent [{3}], Poids [{4}]
|
||||
stream.reset.fail=Connection [{0}], Flux [{1}], Echec de réinitialisation du flux
|
||||
stream.reset.receive=Connection [{0}], Flux [{1}], Réinitialisation reçue à cause de [{2}]
|
||||
stream.reset.send=Connection [{0}], Flux [{1}], Réinitialisation envoyée à cause de [{2}]
|
||||
stream.trailerHeader.noEndOfStream=Connection [{0}], Flux [{1}], Les en-têtes de fin n''incluent pas l''indicateur de fin de flux
|
||||
stream.writeTimeout=Temps d'attente maximum du client dépassé pour augmenter la fenêtre de contrôle de flux pour permettre l'écriture de données
|
||||
|
||||
streamProcessor.cancel=Connection [{0}], Flux [{1}], Le reste du corps de la requête n''est pas nécessaire
|
||||
streamProcessor.error.connection=Connection [{0}], Stream [{1}], Une erreur s''est produite dans le traitement, fatale pour la connection
|
||||
streamProcessor.error.stream=Connection [{0}], Flux [{1}], Une erreur d''est produite durant le traitement qui a été fatale au flux
|
||||
streamProcessor.flushBufferedWrite.entry=Connection [{0}], Flux [{1}], Envoi des écritures mises en tampon
|
||||
streamProcessor.service.error=Erreur durant le traitement de la requête
|
||||
|
||||
streamStateMachine.debug.change=Connection [{0}], Flux [{1}], L’état a changé de [{2}] vers [{3}]
|
||||
streamStateMachine.invalidFrame=Connection [{0}], Flux [{1}], Etat [{2}], Type de trame [{3}]
|
||||
|
||||
upgradeHandler.allocate.debug=Connection [{0}], Flux [{1}], [{2}] octets alloués
|
||||
upgradeHandler.allocate.left=Connection [{0}], Flux [{1}], [{2}] octets désalloués, essai d''allocation aux enfants
|
||||
upgradeHandler.allocate.recipient=Connection [{0}], Flux [{1}], receveur potentiel [{2}] avec poids [{3}]
|
||||
upgradeHandler.connectionError=Erreur de la connection
|
||||
upgradeHandler.dependency.invalid=Connection [{0}], Flux [{1}], Un flux ne peut dépendre de lui-même
|
||||
upgradeHandler.goaway.debug=Connection [{0}], Goaway, Dernier flux [{1}], Code d''erreur [{2}], Données de débogage [{3}]
|
||||
upgradeHandler.init=Connection [{0}], Etat [{1}]
|
||||
upgradeHandler.initialWindowSize.invalid=Connection [{0}], La valeur [{1}] initiale de la taille de fenêtre est invalide
|
||||
upgradeHandler.invalidPreface=Connection [{0}], Préface de connection invalide
|
||||
upgradeHandler.ioerror=Connection [{0}]
|
||||
upgradeHandler.noAllocation=Connection [{0}], Flux [{1}], Temps d''attente maximum dépassé lors de l''allocation
|
||||
upgradeHandler.noNewStreams=Connection [{0}], Flux [{1}], Flux ignoré car aucun nouveau flux n''est autorisé sur cette connection
|
||||
upgradeHandler.pause.entry=Connection [{0}] mise en pause
|
||||
upgradeHandler.pingFailed=La connection [{0}] a échoué à envoyer un ping au client
|
||||
upgradeHandler.prefaceReceived=Connection [{0}], préface de la connection recue du client
|
||||
upgradeHandler.pruneIncomplete=Connexion [{0}], Flux [{1}], Erreur lors de l''élimination complète de la connexion parce que des flux sont encore actifs / utilisés dans l''arbre de priorité, il y a [{2}] flux en trop
|
||||
upgradeHandler.pruneStart=Connection [{0}] Début de l''élimination des anciens flux, la limite est de [{1}] + 10% et il y a actuellement [{2}] flux
|
||||
upgradeHandler.pruned=Connection [{0}] Elimination du flux terminé [{1}]
|
||||
upgradeHandler.prunedPriority=La connexion [{0}] a élagué le flux inutilisé [{1}] qui faisait peut-être partie de l''arbre de priorité
|
||||
upgradeHandler.releaseBacklog=Connection [{0}], Flux [{1}] enlevée de la file d''attente
|
||||
upgradeHandler.rst.debug=Connexion [{0}], Flux [{1}], Erreur [{2}], Message [{3}], RST (fermeture du flux)
|
||||
upgradeHandler.sendPrefaceFail=Connexion [{0}], échec d''envoi de la préface au client
|
||||
upgradeHandler.socketCloseFailed=Echec de la fermeture du socket
|
||||
upgradeHandler.stream.closed=Le flux [{0}] a déjà été fermé auparavant
|
||||
upgradeHandler.stream.even=Un nouvel ID de flux distant (remote stream) [{0}] a été requis, mais tous les flux distants doivent utiliser ID impairs
|
||||
upgradeHandler.stream.notWritable=Connection [{0}], Flux [{1}], Impossible d''écrire sur ce flux
|
||||
upgradeHandler.stream.old=Un nouveau flux distant avec l''ID [{0}] a été demandé mais le flux le plus récent est [{1}]
|
||||
upgradeHandler.tooManyRemoteStreams=Le client a essayé d''utiliser plus de [{0}] flux actifs
|
||||
upgradeHandler.tooMuchOverhead=Connection [{0}], Le traitement est trop coûteux donc la connection sera fermée
|
||||
upgradeHandler.unexpectedAck=Connection [{0}], Flux [{1}], Une notification de réception de paramètres a été reçue alors qu''aucune n''était attendue
|
||||
upgradeHandler.upgrade=Connexion [{0}], HTTP/1.1 transformée en flux [1]
|
||||
upgradeHandler.upgrade.fail=Connection [{0}], Echec de l''upgrade de HTTP/1.1
|
||||
upgradeHandler.upgradeDispatch.entry=Entrée, Connection [{0}], SocketStatus [{1}]
|
||||
upgradeHandler.upgradeDispatch.exit=Sortie, Connection [{0}], SocketState [{1}]
|
||||
upgradeHandler.windowSizeReservationInterrupted=Connection [{0}], Flux [{1}], réservé [{2}] octets
|
||||
upgradeHandler.windowSizeTooBig=Connection [{0}], Flux [{1}], La taille de la fenêtre est trop grosse
|
||||
upgradeHandler.writeBody=Connection [{0}], Flux [{1}], Taille des données [{2}]
|
||||
upgradeHandler.writeHeaders=Connection [{0}], Stream [{1}]
|
||||
upgradeHandler.writePushHeaders=Connection [{0}], Flux [{1}], Flux de push [{2}], EndOfStream [{3}]
|
||||
|
||||
windowAllocationManager.dispatched=Connection [{0}], Flux [{1}], Envoyé
|
||||
windowAllocationManager.notified=Connection [{0}], Flux [{1}], Notifié
|
||||
windowAllocationManager.notify=Connection [{0}], Flux [{1}], Attente de type [{2}], Notification de type [{3}]
|
||||
windowAllocationManager.waitFor.connection=Connection [{0}], Flux [{1}], Attente d''une fenêtre de contrôle de flux de Connection (bloquante) avec une délai maximum d''attente de [{2}]
|
||||
windowAllocationManager.waitFor.ise=Connection [{0}], Flux [{1}], Déjà en train d''attendre
|
||||
windowAllocationManager.waitFor.stream=Connection [{0}], Flux [{1}], Attente d''une fenêtre de contrôle de flux de Flux (bloquante) avec une délai maximum d''attente de [{2}]
|
||||
windowAllocationManager.waitForNonBlocking.connection=Connection [{0}], Flux [{1}], Attente d''une fenêtre de contrôle de flux de Connection (non bloquante)
|
||||
windowAllocationManager.waitForNonBlocking.stream=Connection [{0}], Flux [{1}], Attente d''une fenêtre de contrôle de flux de Flux (non bloquante)
|
||||
|
||||
writeStateMachine.endWrite.ise=il est illégal de spécifier [{0}] pour le nouvel état dès lors qu''une écriture s''est terminée
|
||||
writeStateMachine.ise=Il est illégal d'appeler [{0}()] dans l'état [{1}]
|
||||
148
java/org/apache/coyote/http2/LocalStrings_ja.properties
Normal file
148
java/org/apache/coyote/http2/LocalStrings_ja.properties
Normal file
@@ -0,0 +1,148 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeDec=コネクション[{0}]、ストリーム[{1}]、フロー制御ウィンドウを[{2}]ずつ[{3}]に縮小
|
||||
abstractStream.windowSizeInc=コネクション [{0}]、ストリーム [{1}]、フロー制御ウインドウを [{2}] から [{3}] に増加します。
|
||||
abstractStream.windowSizeTooBig=コネクション[{0}]、ストリーム[{1}]、[{2}]のウィンドウサイズが許容最大値を超える[{3}]に増加しました。
|
||||
|
||||
connectionPrefaceParser.eos=オープニングクライアントPrefaceのバイトシーケンスを読み取っているときに予期しないストリームの終わりが発生しました。 [{0}]バイトだけが読み込まれます。
|
||||
connectionPrefaceParser.ioError=オープニングクライアントPreface のバイトシーケンスの読み取りに失敗しました。
|
||||
connectionPrefaceParser.mismatch=コネクションプリフェイス [{0}] の先頭に未知のバイト列を受信しました。
|
||||
|
||||
connectionSettings.debug=コネクション [{0}]、パラメータ [{1}] に [{2}] を設定しました。
|
||||
connectionSettings.enablePushInvalid=コネクション[{0}]、有効プッシュ[{1}]にリクエストされた値が許容値(0または1)のいずれでもありません。
|
||||
connectionSettings.headerTableSizeLimit=コネクション [{0}]、ヘッダーテーブルサイズに [{1}] を指定されましたが上限は 16k です。
|
||||
connectionSettings.maxFrameSizeInvalid=コネクション [{0}]、要求された最大フレームサイズ [{1}] は可能な範囲の [{2}] から [{3}] を超えています。
|
||||
connectionSettings.unknown=コネクション [{0}]、未知の設定名 [{1}] の値 [{2}] を無視しました。
|
||||
connectionSettings.windowSizeTooBig=コネクション [{0}]、要求されたウインドウサイズ [{1}] は上限値 [{2}] を越えています。
|
||||
|
||||
frameType.checkPayloadSize=[{0}]のペイロードサイズがフレームタイプ[{1}]に無効です
|
||||
frameType.checkStream=無効なフレームタイプ[{0}]
|
||||
|
||||
hpack.integerEncodedOverTooManyOctets=エンコードされたHPACK可変長整数は多くのオクテットを超過。最大値は[{0}]
|
||||
hpack.invalidCharacter=コードポイント [{1}] のユニコード文字 [{0}] は有効範囲 0 から 255 の範囲外のため、エンコードできません。
|
||||
|
||||
hpackEncoder.encodeHeader=ヘッダー[{0}]を値[{1}]でエンコードしています
|
||||
|
||||
hpackdecoder.headerTableIndexInvalid=[{1}]静的エントリと[{2}]動的エントリが存在するため、ヘッダーテーブルインデックス[{0}]は無効です。
|
||||
hpackdecoder.notImplemented=まだ実装されていません。
|
||||
hpackdecoder.tableSizeUpdateNotAtStart=すべてのテーブルサイズの更新はヘッダーブロックの先頭に送信する必要があります。
|
||||
hpackdecoder.zeroNotValidHeaderTableIndex=ゼロは有効なヘッダーテーブルインデックスではありません。
|
||||
|
||||
hpackhuffman.huffmanEncodedHpackValueDidNotEndWithEOS=HPACK ヘッダーのハフマン符号化した値は EOS パディングで終了していません。
|
||||
hpackhuffman.stringLiteralTooMuchPadding=Huffman 符号化された文字列リテラルの終わりに、7ビット以上のEOSパディングが提供されました。
|
||||
|
||||
http2Parser.headerLimitCount=コネクション [{0}]、ストリーム [{1}]、ヘッダーが多すぎます。
|
||||
http2Parser.headerLimitSize=コネクション [{0}]、ストリーム [{1}]、合計ヘッダーサイズが大きすぎます。
|
||||
http2Parser.headers.wrongFrameType=コネクション[{0}]、ストリーム[{1}]の進行中のヘッダー、しかしタイプ[{2}]のフレームが受信されました。
|
||||
http2Parser.headers.wrongStream=接続[{0}]、ストリーム[{1}]のヘッダ処理中にストリーム[{2}]のフレームが受信されました
|
||||
http2Parser.nonZeroPadding=コネクション[{0}]、ストリーム[{1}]、非ゼロのパディングを受信しました。
|
||||
http2Parser.payloadTooBig=ペイロードの長さは[{0}]バイトですが、最大フレームサイズは[{1}]です
|
||||
http2Parser.preface.invalid=無効なコネクションpreface が提示されました
|
||||
http2Parser.preface.io=コネクションprefaceを読むことができません。
|
||||
http2Parser.processFrame=コネクション[{0}]、ストリーム[{1}]、フレームタイプ[{2}]、フラグ[{3}]、ペイロードサイズ[{4}]
|
||||
http2Parser.processFrame.tooMuchPadding=コネクション [{0}]、ストリーム [{1}]、ペイロード [{3}] に対してパディング長 [{2}] は大きすぎます。
|
||||
http2Parser.processFrame.unexpectedType=予想されるフレームタイプ[{0}]、しかし受信されたフレームタイプ[{1}]
|
||||
http2Parser.processFrameContinuation.notExpected=コネクション[{0}]、進行中のヘッダーがないときにストリーム[{1}]のContinuationフレームが受信されました
|
||||
http2Parser.processFrameData.lengths=コネクション[{0}]、ストリーム[{1}]、データ長、[{2}]、パディング長[{3}]
|
||||
http2Parser.processFrameData.window=コネクション [{0}]、クライアントはストリームウインドウサイズより大きなデータを送信しました。
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=HPAC をデコードしたのにデータが残っています。すべて使用するべきです。
|
||||
http2Parser.processFrameHeaders.decodingFailed=HTTP ヘッダーの HPACK 復号化中にエラーが発生しました。
|
||||
http2Parser.processFrameHeaders.payload=コネクション[{0}]、ストリーム[{1}]、サイズ[{2}]のヘッダーペイロードを処理中
|
||||
http2Parser.processFramePriority.invalidParent=コネクション[{0}]、ストリーム[{1}]、ストリーム自体に依存しない可能性があります。
|
||||
http2Parser.processFramePushPromise=コネクション [{0}]、ストリーム [{1}]、クライアントから PUSH_PROMISE フレームを送信するべきではありません。
|
||||
http2Parser.processFrameSettings.ackWithNonZeroPayload=ACKフラグがセットされ、ペイロードが存在する状態で受信されたSettingsフレーム
|
||||
http2Parser.processFrameWindowUpdate.debug=コネクション [{0}]、ストリーム [{1}]、ウインドウサイズを [{2}] に拡大します。
|
||||
http2Parser.processFrameWindowUpdate.invalidIncrement=無効な増分サイズ[{0}]で受信されたWindow Updateフレーム
|
||||
http2Parser.swallow.debug=コネクション[{0}]、ストリーム[{1}]、飲み込まれた[{2}]バイト
|
||||
|
||||
pingManager.roundTripTime=コネクション [{0}] の往復時間は [{1}] ns でした。
|
||||
|
||||
stream.closed=コネクション [{0}]、ストリーム [{1}]、切断したストリームには書き込みできません。
|
||||
stream.header.case=コネクション [{0}]、ストリーム [{1}]、HTTP ヘッダー名 [{2}] は小文字でなければなりません。
|
||||
stream.header.connection=コネクション [{0}]、ストリーム [{1}]、HTTP/2 のリクエストには HTTP ヘッダー [connection] を指定することはできません。
|
||||
stream.header.contentLength=コネクション [{0}]、ストリーム [{1}]、content length ヘッダーの値 [{2}] と受信したデータ長 [{3}] は一致しません。
|
||||
stream.header.debug=コネクション [{0}]、ストリーム [{1}]、HTTP ヘッダー [{2}]、値は [{3}]
|
||||
stream.header.duplicate=コネクション [{0}]、ストリーム [{1}]、ヘッダー [{3}] を複数受信しました。
|
||||
stream.header.invalid=コネクション [{0}]、ストリーム [{1}]、ヘッダー [{2}] に不正な値 [{3}] が含まれています。
|
||||
stream.header.noPath=コネクション [{0}]、ストリーム [{1}]、疑似ヘッダー [:path] が空です。
|
||||
stream.header.required=コネクション [{0}]、ストリーム [{1}]、1つ以上の必須ヘッダがありません。
|
||||
stream.header.te=コネクション [{0}]、ストリーム [{1}]、HTTP/2 のリクエストでは HTTP ヘッダー [te] の値に [{2}] を指定できません。
|
||||
stream.header.unexpectedPseudoHeader=コネクション [{0}]、ストリーム [{1}]、通常のヘッダーの後に疑似ヘッダー [{2}] を受信しました。
|
||||
stream.header.unknownPseudoHeader=コネクション [{0}]、ストリーム [{1}]、未知の疑似ヘッダー [{2}] を受信しました。
|
||||
stream.inputBuffer.copy=入力バッファーから出力バッファーへコピーしたのは [{0}] バイトです。
|
||||
stream.inputBuffer.dispatch=read interest が登録されると、inBufferにデータが追加されます。 読み取りディスパッチをトリガします。
|
||||
stream.inputBuffer.empty=ストリーム入力バッファが空です。 より多くのデータを待っています。
|
||||
stream.inputBuffer.readTimeout=クライアントからデータを読み取る待機中のタイムアウト
|
||||
stream.inputBuffer.reset=ストリームリセット
|
||||
stream.inputBuffer.signal=読み込みスレッドが待機している間に inBuffer へデータが追加されました。スレッドへ処理の再開を通知しす。
|
||||
stream.notWritable=コネクション [{0}]、ストリーム [{1}]、このストリームには書き込みできません。
|
||||
stream.outputBuffer.flush.debug=コネクション[{0}]、ストリーム[{1}]、バッファポジション[{2}]で出力をフラッシュ、writeInProgress [{3}]、クローズ[{4}]
|
||||
stream.reprioritisation.debug=コネクション[{0}]、ストリーム[{1}]、排他[{2}]、Parent[{3}]、重み[{4}]
|
||||
stream.reset.fail=コネクション [{0}]、ストリーム [{1}]、ストリームをリセットできません。
|
||||
stream.reset.receive=コネクション[{0}]、ストリーム[{1}]、[{2}]のために受信されたリセット
|
||||
stream.reset.send=コネクション [{0}]、ストリーム [{1}]、[{2}] が原因で RESET を送信しました。
|
||||
stream.trailerHeader.noEndOfStream=コネクション[{0}]、ストリーム[{1}]、trailer ヘッダーにストリーム終了フラグが含まれていません。
|
||||
stream.writeTimeout=クライアントがストリームデータの書き込みを許可するためにフロー制御ウィンドウを増やすのを待つタイムアウト
|
||||
|
||||
streamProcessor.error.connection=コネクション[{0}]、ストリーム[{1}]、コネクションに致命的なエラーが処理中に発生しました。
|
||||
streamProcessor.error.stream=コネクション[{0}]、ストリーム[{1}]、処理中にストリームに致命的なエラーが発生しました。
|
||||
streamProcessor.flushBufferedWrite.entry=コネクション [{0}]、ストリーム [{1}]、書き込み用バッファをフラッシュします。
|
||||
streamProcessor.service.error=リクエスト処理中のエラー
|
||||
|
||||
streamStateMachine.debug.change=コネクション [{0}]、ストリーム [{1}]、状態を [{2}] から [{3}] へ変更しました。
|
||||
streamStateMachine.invalidFrame=コネクション [{0}]、ストリーム [{1}]、状態 [{2}]、フレーム種類 [{3}]
|
||||
|
||||
upgradeHandler.allocate.debug=コネクション[{0}]、ストリーム[{1}]、割り当てられた[{2}]バイト
|
||||
upgradeHandler.allocate.left=コネクション[{0}]、ストリーム[{1}]、[{2}]バイトが未割り当て - 子への割り当てを試みています。
|
||||
upgradeHandler.allocate.recipient=コネクション[{0}]、ストリーム[{1}]、重み[{3}]の潜在的な受信者[{2}]
|
||||
upgradeHandler.connectionError=接続エラー
|
||||
upgradeHandler.dependency.invalid=コネクション [{0}]、ストリーム [{1}]、ストリームは自分自身に依存するべきではありません。
|
||||
upgradeHandler.goaway.debug=コネクション[{0}]、Goaway、最終ストリーム[{1}]、エラーコード[{2}]、デバッグデータ[{3}]
|
||||
upgradeHandler.init=コネクション[{0}]、状態[{1}]
|
||||
upgradeHandler.initialWindowSize.invalid=コネクション[{0}]、[{1}]の無効な値は初期ウィンドウサイズで無視されました
|
||||
upgradeHandler.invalidPreface=コネクション[{0}]、無効なConnection Preface
|
||||
upgradeHandler.ioerror=コネクション[{0}]
|
||||
upgradeHandler.noNewStreams=コネクション [{0}]、ストリーム [{1}]、このコネクションには新しいストリームを作成できないためストリームを無視します。
|
||||
upgradeHandler.pause.entry=コネクション[{0}] 一時停止中
|
||||
upgradeHandler.pingFailed=コネクション [{0}]、クライアントへ ping を送信できません。
|
||||
upgradeHandler.prefaceReceived=コネクション [{0}]、クライアントからコネクションプリフェイスを受信しました。
|
||||
upgradeHandler.pruneIncomplete=コネクション [{0}]、コネクションを削除できませんでした。ストリームが有効である、あるいは、優先度木に登録されているからです。現在のストリーム数 [{2}] は多すぎます。
|
||||
upgradeHandler.pruneStart=コネクション[{0}] 古いストリームのプルーニングを開始します。 上限は[{1}] + 10%で、現在[{2}]ストリームがあります。
|
||||
upgradeHandler.pruned=コネクション [{0}]、完了したストリーム [{1}] は削除します。
|
||||
upgradeHandler.prunedPriority=コネクション [{0}]、優先度木に登録されていた可能性のある未使用のストリーム [{1}] を取り除きました。
|
||||
upgradeHandler.releaseBacklog=コネクション[{0}]、ストリーム[{1}]はバックログから解放されました。
|
||||
upgradeHandler.rst.debug=コネクション [{0}]、ストリーム [{1}]、エラー [{2}]、メッセージ [{3}]、RST(ストリームを切断します)
|
||||
upgradeHandler.sendPrefaceFail=コネクション [{0}]、クライアントにプリフェイスを送信できませんでした。
|
||||
upgradeHandler.socketCloseFailed=ソケットの切断に失敗しました。
|
||||
upgradeHandler.stream.closed=ストリーム[{0}]がしばらく閉じられていました
|
||||
upgradeHandler.stream.even=新しいリモートストリーム ID [{0}] を要求されましたがリモートストリームの ID は奇数でなければなりません。
|
||||
upgradeHandler.stream.notWritable=コネクション[{0}]、ストリーム[{1}]、このストリームは書き込み可能ではありません
|
||||
upgradeHandler.stream.old=新しいリモートストリーム ID [{0}] を要求されましたが、最新のストリームは [{1}] です。
|
||||
upgradeHandler.tooManyRemoteStreams=クライアントは[{0}]以上のアクティブなストリームを使用しようとしました
|
||||
upgradeHandler.tooMuchOverhead=Connection [{0}]、オーバーヘッドが多すぎるため、接続が閉じられます。
|
||||
upgradeHandler.unexpectedAck=コネクション[{0}]、ストリーム[{1}]、予期しないときにsettings ackを受信しました。
|
||||
upgradeHandler.upgrade=コネクション[{0}]、ストリーム[1]へのHTTP / 1.1 upgrade
|
||||
upgradeHandler.upgrade.fail=コネクション[{0}]、HTTP / 1.1のアップグレードに失敗しました
|
||||
upgradeHandler.upgradeDispatch.entry=エントリ、コネクション[{0}]、ソケット状態 [{1}]
|
||||
upgradeHandler.upgradeDispatch.exit=終了、コネクション[{0}]、ソケット状態[{1}]
|
||||
upgradeHandler.windowSizeReservationInterrupted=コネクション[{0}]、ストリーム[{1}]、[{2}]バイトの予約
|
||||
upgradeHandler.windowSizeTooBig=コネクション[{0}]、ストリーム[{1}]、ウィンドウサイズが大きすぎます
|
||||
upgradeHandler.writeBody=コネクション [{0}]、ストリーム [{1}]、データ長 [{2}]
|
||||
upgradeHandler.writeHeaders=コネクション [{0}], ストリーム [{1}]
|
||||
upgradeHandler.writePushHeaders=コネクション[{0}]、ストリーム[{1}]、プッシュされたストリーム[{2}]、EndOfStream [{3}]
|
||||
|
||||
writeStateMachine.endWrite.ise=書き込みが完了したら新しい状態に[{0}]を指定するのは不正です。
|
||||
writeStateMachine.ise=状態[{1}]の[{0}()]を呼び出すことは不正です。
|
||||
162
java/org/apache/coyote/http2/LocalStrings_ko.properties
Normal file
162
java/org/apache/coyote/http2/LocalStrings_ko.properties
Normal file
@@ -0,0 +1,162 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeDec=연결 [{0}], 스트림 [{1}], flow control 윈도우를 [{2}] 만큼 줄여 [{3}]에 이르게 합니다.
|
||||
abstractStream.windowSizeInc=연결 [{0}], 스트림 [{1}]: Flow control 윈도우를 [{2}] 만큼 증가시켜 윈도우 크기가 [{3}]이(가) 되도록 합니다.
|
||||
abstractStream.windowSizeTooBig=연결 [{0}], 스트림 [{1}], 윈도우 크기를 [{2}] 만큼 증가시켜 [{3}](으)로 만들었으나, 이는 허용된 최대값을 초과했습니다.
|
||||
|
||||
connectionPrefaceParser.eos=개시되는 클라이언트 preface 바이트 시퀀스를 읽는 동안, 예기치 않은 스트림의 끝. 단지 [{0}] 바이트만을 읽음.
|
||||
connectionPrefaceParser.ioError=개시되는 클라이언트 preface 바이트 시퀀스를 읽지 못했습니다.
|
||||
connectionPrefaceParser.mismatch=해당 client preface [{0}]의 시작 부분에서 예기치 않은 바이트 시퀀스를 받았습니다.
|
||||
|
||||
connectionSettings.debug=연결 [{0}]: 파라미터 타입 [{1}]을(를) [{2}](으)로 설정함.
|
||||
connectionSettings.enablePushInvalid=연결 [{0}], enablePush를 위해 요청된 값 [{1}]은(는), 허용된 값들(0 또는 1) 중의 하나가 아닙니다.
|
||||
connectionSettings.headerTableSizeLimit=연결 [{0}]: 헤더 테이블 크기로 [{1}]을(를) 설정하려 시도했으나, 한계값은 16k입니다.
|
||||
connectionSettings.maxFrameSizeInvalid=연결 [{0}]: [{1}]의 요청된 최대 프레임 크기가 허용된 범위([{2}] - [{3}])의 바깥에 존재합니다.
|
||||
connectionSettings.unknown=연결 [{0}]: 식별자가 [{1}](이)고 값이 [{2}]인 알 수 없는 설정이 무시되었습니다.
|
||||
connectionSettings.windowSizeTooBig=연결 [{0}]: 요청된 윈도우 크기 [{1}]이(가) 최대 허용치 [{2}] 보다 큽니다.
|
||||
|
||||
frameType.checkPayloadSize=Payload의 크기 [{0}]은(는) 프레임 타입 [{1}]을(를) 위해 유효하지 않습니다.
|
||||
frameType.checkStream=유효하지 않은 프레임 타입 [{0}]
|
||||
|
||||
hpack.integerEncodedOverTooManyOctets=HPACK 가변 길이 정수가 너무 많은 옥텟(octet)들로 인코딩되어 있습니다. 최대 길이는 [{0}]입니다.
|
||||
hpack.invalidCharacter=code point [{1}]에 위치한 유니코드 문자 [{0}]은(는), 0에서 255까지의 허용 범위 바깥에 있으므로 인코딩될 수 없습니다.
|
||||
|
||||
hpackEncoder.encodeHeader=인코딩 헤더 [{0}]와(과) 그의 값 [{1}]
|
||||
|
||||
hpackdecoder.headerTableIndexInvalid=[{1}]개의 정적 엔트리들과 [{2}]개의 동적 엔트리들이 존재하기에, 헤더 테이블 인덱스 [{0}]은(는) 유효하지 않습니다.
|
||||
hpackdecoder.maxMemorySizeExceeded=헤더 테이블 크기 [{1}]이(가) 최대 크기 [{1}]을(를) 초과합니다.
|
||||
hpackdecoder.notImplemented=아직 구현 안됨
|
||||
hpackdecoder.nullHeader=인덱스가 [{0}]인 위치에 널 헤더가 존재합니다.
|
||||
hpackdecoder.tableSizeUpdateNotAtStart=테이블 크기 변경은, 반드시 헤더 블록의 시작 시에 전송되어야만 합니다.
|
||||
hpackdecoder.zeroNotValidHeaderTableIndex=0은 유효한 헤더 테이블 인덱스가 아닙니다.
|
||||
|
||||
hpackhuffman.huffmanEncodedHpackValueDidNotEndWithEOS=HPACK 헤더들 내의 Huffman 알고리즘으로 인코딩된 값이, EOS padding으로 끝나지 않았습니다.
|
||||
hpackhuffman.stringLiteralEOS=HPACK 헤더들 내의 Huffman 알고리즘으로 인코딩된 값이, EOS 부호를 포함했습니다.
|
||||
hpackhuffman.stringLiteralTooMuchPadding=Huffman 알고리즘으로 인코딩된 문자열의 끝에 7 비트를 초과한 EOS padding입니다.
|
||||
|
||||
http2Parser.headerLimitCount=연결 [{0}], 스트림 [{1}], 너무 많은 헤더들이 있음
|
||||
http2Parser.headerLimitSize=연결 [{0}], 스트림 [{1}], 전체 헤더 크기가 너무 큽니다.
|
||||
http2Parser.headers.wrongFrameType=연결 [{0}], 스트림 [{1}]을(를) 위한 헤더들이 진행중이지만, 타입 [{2}]의 프레임을 받았습니다.
|
||||
http2Parser.headers.wrongStream=연결 [{0}]: 스트림 [{1}]의 헤더들을 처리하는 과정에서, 스트림 [{2}]의 프레임을 받았습니다.
|
||||
http2Parser.nonZeroPadding=연결 [{0}], 스트림 [{1}], 0이 아닌 padding을 받았습니다.
|
||||
http2Parser.payloadTooBig=Payload의 길이가 [{0}]바이트이지만, 최대 프레임 크기는 [{1}]입니다.
|
||||
http2Parser.preface.invalid=유효하지 않은 연결 preface 이(가) 제공되었습니다.
|
||||
http2Parser.preface.io=연결 preface를 읽을 수 없습니다.
|
||||
http2Parser.processFrame=연결 [{0}], 스트림 [{1}], 프레임 타입 [{2}], 플래그들 [{3}], Payload 크기 [{4}]
|
||||
http2Parser.processFrame.tooMuchPadding=연결 [{0}], 스트림 [{1}], padding 길이 [{2}]은(는) payload [{3}]을(를) 위해 너무 큽니다.
|
||||
http2Parser.processFrame.unexpectedType=프레임 타입 [{0}]이(가) 요구되었으나, 프레임 타입 [{1}]을(를) 받았습니다.
|
||||
http2Parser.processFrameContinuation.notExpected=연결 [{0}]: 헤더들이 아무 것도 진행되지 않은 상태에서, 스트림 [{1}]을(를) 위한 Continuation 프레임을 받았습니다.
|
||||
http2Parser.processFrameData.lengths=연결 [{0}], 스트림 [{1}], 데이터 길이, [{2}], Padding 길이 [{3}]
|
||||
http2Parser.processFrameData.window=연결 [{0}]: 클라이언트가 스트림 윈도우가 허용하는 데이터 크기보다 더 많은 데이터를 전송했습니다.
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=HPACK 디코딩 후 남아있는 데이터 - 반드시 소비되었어야 합니다.
|
||||
http2Parser.processFrameHeaders.decodingFailed=HTTP 헤더들의 HPACK 디코딩 과정에서 오류가 있었습니다.
|
||||
http2Parser.processFrameHeaders.payload=연결 [{0}], 스트림 [{1}], 크기가 [{2}]인 헤더들의 payload를 처리합니다.
|
||||
http2Parser.processFramePriority.invalidParent=연결 [{0}], 스트림 [{1}], 스트림이 그 자신을 의존할 수는 없습니다.
|
||||
http2Parser.processFramePushPromise=연결 [{0}], 스트림 [{1}], Push promise 프레임들이 클라이언트에 의해 전송되어서는 안됩니다.
|
||||
http2Parser.processFrameSettings.ackWithNonZeroPayload=ACK 플래그가 설정되고 payload가 존재하는, Settings 프레임을 받았습니다.
|
||||
http2Parser.processFrameWindowUpdate.debug=연결 [{0}], 스트림 [{1}], 윈도우 크기를 [{2}] 만큼 증가 시킵니다.
|
||||
http2Parser.processFrameWindowUpdate.invalidIncrement=유효하지 않은 증분 크기인 [{0}]와(과) 함께, 윈도우 변경 프레임을 받았습니다.
|
||||
http2Parser.swallow.debug=연결 [{0}], 스트림 [{1}], [{2}] 바이트를 처리하지 않고 건너뛰었습니다.
|
||||
|
||||
pingManager.roundTripTime=연결 [{0}]: 라운드 트립 시간이 [{1}] 나노초(ns)로 측정되었습니다.
|
||||
|
||||
stream.closed=연결 [{0}], 스트림 [{1}], 한번 닫힌 스트림에 쓰기를 할 수 없습니다.
|
||||
stream.header.case=연결 [{0}], 스트림 [{1}], HTTP 헤더 이름 [{2}]은(는) 반드시 소문자여야 합니다.
|
||||
stream.header.connection=연결 [{0}], 스트림 [{1}], HTTP 헤더 [connection]은 HTTP/2 요청에서 허용되지 않습니다.
|
||||
stream.header.contentLength=연결 [{0}], 스트림 [{1}], 해당 Content-Length 헤더 값 [{2}]은(는) 수신된 데이터의 크기 [{3}]와(과) 일치하지 않습니다.
|
||||
stream.header.debug=연결 [{0}], 스트림 [{1}], HTTP 헤더: [{2}], 값: [{3}]
|
||||
stream.header.duplicate=연결 [{0}], 스트림 [{1}], 여러 개의 [{3}] 헤더들을 받았습니다.
|
||||
stream.header.invalid=연결 [{0}], 스트림 [{1}], 헤더 [{2}]이(가) 유효하지 않은 값을 포함했습니다: [{3}]
|
||||
stream.header.noPath=연결 [{0}], 스트림 [{1}], [:path] 가상 헤더가 비어 있었습니다.
|
||||
stream.header.required=연결 [{0}], 스트림 [{1}], 하나 이상의 필수 헤더들이 없습니다.
|
||||
stream.header.te=연결 [{0}], 스트림 [{1}], HTTP/2 요청에서, HTTP 헤더 [te]이(가) 값 [{2}]을(를) 갖는 것은 허용되지 않습니다.
|
||||
stream.header.unexpectedPseudoHeader=연결 [{0}], 스트림 [{1}], 정규 헤더 다음에 가상 헤더 [{2}]을(를) 받았습니다.
|
||||
stream.header.unknownPseudoHeader=연결 [{0}], 스트림 [{1}], 알 수 없는 가상 헤더 [{2}]을(를) 받았습니다.
|
||||
stream.inputBuffer.copy=[{0}] 바이트를 inBuffer에서 outBuffer로 복사합니다.
|
||||
stream.inputBuffer.dispatch=readInterest가 등록될 때, 데이터가 inBuffer에 추가되었습니다. 읽기 디스패치를 개시합니다.
|
||||
stream.inputBuffer.empty=스트림의 입력 버퍼가 비어 있습니다. 더 많은 데이터를 기다립니다.
|
||||
stream.inputBuffer.readTimeout=클라이언트로부터 데이터를 읽기를 일정 시간 동안 기다리는 중입니다.
|
||||
stream.inputBuffer.reset=스트림이 재설정(reset)되었습니다.
|
||||
stream.inputBuffer.signal=읽기 쓰레드가 대기하는 동안 inBuffer에 데이터가 추가되었습니다. 해당 쓰레드가 읽기를 계속하도록 시그널을 보냅니다.
|
||||
stream.notWritable=연결 [{0}], 스트림 [{1}], 이 스트림은 쓰기 가능하지 않습니다.
|
||||
stream.outputBuffer.flush.debug=연결 [{0}], 스트림 [{1}], 위치 [{2}]의 버퍼를 출력으로 배출합니다. 쓰기 진행 중 여부: [{3}],닫힘 여부: [{4}]
|
||||
stream.reprioritisation.debug=연결 [{0}], 스트림 [{1}], 배타성 [{2}], 부모 [{3}], 가중치 [{4}]
|
||||
stream.reset.fail=연결 [{0}], 스트림 [{1}], 스트림을 재설정(reset)하지 못했습니다.
|
||||
stream.reset.receive=연결 [{0}], 스트림 [{1}], [{2}](으)로 인해 재설정(reset)을 받았습니다.
|
||||
stream.reset.send=연결 [{0}], 스트림 [{1}], [{2}](으)로 인하여 재설정(reset)이 전송되었음.
|
||||
stream.trailerHeader.noEndOfStream=연결 [{0}], 스트림 [{1}], Trailer 헤더들이 스트림의 끝 플래그를 포함하지 않았습니다.
|
||||
stream.writeTimeout=스트림 데이터가 쓰여지도록 허용하기 위한 흐름 제어 (flow control) 윈도우를, 클라이언트가 증가시키기를 일정 시간 동안 기다리는 중입니다.
|
||||
|
||||
streamProcessor.cancel=연결 [{0}], 스트림 [{1}], 요청의 body가 완전히 읽히지 않고 남아 있어, 더 이상 데이터는 불필요합니다.
|
||||
streamProcessor.error.connection=연결 [{0}], 스트림 [{1}]: 처리 중 해당 연결에 심각한 오류 발생
|
||||
streamProcessor.error.stream=연결 [{0}], 스트림 [{1}], 처리 중 스트림에 치명적인 오류가 발생했습니다.
|
||||
streamProcessor.flushBufferedWrite.entry=연결 [{0}], 스트림 [{1}], 버퍼에 쓰여진 데이터를 배출합니다.
|
||||
streamProcessor.service.error=요청 처리 중 오류 발생
|
||||
|
||||
streamStateMachine.debug.change=연결 [{0}], 스트림 [{1}], 상태가 [{2}]에서 [{3}](으)로 변경됨.
|
||||
streamStateMachine.invalidFrame=연결 [{0}], 스트림 [{1}], 상태 [{2}], 프레임 타입 [{3}]
|
||||
|
||||
upgradeHandler.allocate.debug=연결 [{0}], 스트림 [{1}], [{2}] 바이트를 할당함.
|
||||
upgradeHandler.allocate.left=연결 [{0}], 스트림 [{1}], [{2}] 바이트들이 할당 해제되었습니다 - 자식들에 할당하려 시도합니다.
|
||||
upgradeHandler.allocate.recipient=연결 [{0}], 스트림 [{1}], 가중치 [{3}]의 잠재적 수신자 [{2}]
|
||||
upgradeHandler.connectionError=연결 오류
|
||||
upgradeHandler.dependency.invalid=연결 [{0}], 스트림 [{1}], 스트림들은 자기 자신들에 의존해서는 안됩니다.
|
||||
upgradeHandler.goaway.debug=연결 [{0}], Goaway, 마지막 스트림 [{1}], 오류 코드 [{2}], 디버그 데이터 [{3}]
|
||||
upgradeHandler.init=연결 [{0}], 상태 [{1}]
|
||||
upgradeHandler.initialWindowSize.invalid=연결 [{0}]: 값 [{1}]은(는), 초기 윈도우 크기로서 불허되므로, 무시됩니다.
|
||||
upgradeHandler.invalidPreface=연결 [{0}]: 유효하지 않은 연결 preface
|
||||
upgradeHandler.ioerror=연결 [{0}]
|
||||
upgradeHandler.noAllocation=연결 [{0}], 스트림 [{1}], 연결 할당을 위해 대기하는 중 제한 시간 초과 되었습니다.
|
||||
upgradeHandler.noNewStreams=연결 [{0}], 스트림 [{1}], 이 연결에는 새로운 스트림들이 허용되지 않기에, 스트림이 무시되었습니다.
|
||||
upgradeHandler.pause.entry=연결 [{0}]이(가) 일시 정지 중
|
||||
upgradeHandler.pingFailed=연결 [{0}]: 클라이언트에 ping 메시지를 보내지 못했습니다.
|
||||
upgradeHandler.prefaceReceived=연결 [{0}]: 연결 preface를 클라이언트로부터 받았습니다.
|
||||
upgradeHandler.pruneIncomplete=연결 [{0}]: 스트림들이 Priority tree에서 활성화되어 있거나 사용되고 있기 때문에, 해당 연결을 완전히 제거하지 못했습니다. 너무 많은 스트림들이 존재합니다: [{2}].
|
||||
upgradeHandler.pruneStart=연결 [{0}]: 이전 스트림들에 대한 가지치기를 시작합니다. 한계값은 [{1}] + 10%이고, 현재 [{2}]개의 스트림들이 존재합니다.
|
||||
upgradeHandler.pruned=연결 [{0}]이(가) 완료된 스트림 [{1}]을(를) 제거했습니다.
|
||||
upgradeHandler.prunedPriority=연결 [{0}]이(가) 사용되지 않는 스트림 [{1}]을(를) 제거합니다. 해당 스트림은 priority tree의 일부였을 수 있습니다.
|
||||
upgradeHandler.releaseBacklog=연결 [{0}], 스트림 [{1}]이(가) 백로그로부터 해제되었습니다.
|
||||
upgradeHandler.rst.debug=연결 [{0}], 스트림 [{1}], 오류 [{2}], 메시지 [{3}], RST (스트림을 닫습니다)
|
||||
upgradeHandler.sendPrefaceFail=연결 [{0}]: 클라이언트에 preface를 전송하지 못했습니다.
|
||||
upgradeHandler.socketCloseFailed=소켓을 닫는 중 오류 발생
|
||||
upgradeHandler.stream.closed=스트림 [{0}]이(가) 얼마 동안 이미 닫혀 있었습니다.
|
||||
upgradeHandler.stream.even=[{0}]의 새로운 원격 스트림 ID가 요청되었으나, 모든 원격 스트림은 반드시 홀수의 ID를 사용해야 합니다.
|
||||
upgradeHandler.stream.notWritable=연결 [{0}], 스트림 [{1}], 이 스트림은 쓰기 가능하지 않습니다.
|
||||
upgradeHandler.stream.old=새로운 원격 스트림 ID [{0}]이(가) 요청되었지만, 가장 최근의 스트림은 [{1}]이었습니다.
|
||||
upgradeHandler.tooManyRemoteStreams=클라이언트가, 활성화된 스트림들을 [{0}]개를 초과하여 사용하려 시도했습니다.
|
||||
upgradeHandler.tooMuchOverhead=연결 [{0}]: 너무 많은 오버헤드로 인하여 연결이 닫힐 것입니다.
|
||||
upgradeHandler.unexpectedAck=연결 [{0}], 스트림 [{1}], 예기치 않은 상황에서 settings acknowledgement를 받았습니다.
|
||||
upgradeHandler.upgrade=연결 [{0}]: HTTP/1.1이 스트림 [1](으)로 업그레이드됩니다.
|
||||
upgradeHandler.upgrade.fail=연결 [{0}], HTTP/1.1 업그레이드 실패
|
||||
upgradeHandler.upgradeDispatch.entry=엔트리, 연결 [{0}], SocketStatus [{1}]
|
||||
upgradeHandler.upgradeDispatch.exit=Exit, 연결 [{0}], SocketState [{1}]
|
||||
upgradeHandler.windowSizeReservationInterrupted=연결 [{0}], 스트림 [{1}], 예비하려 한 바이트 크기: [{2}]
|
||||
upgradeHandler.windowSizeTooBig=연결 [{0}], 스트림 [{1}], 윈도우 크기가 너무 큽니다.
|
||||
upgradeHandler.writeBody=연결 [{0}], 스트림 [{1}], 데이터 길이 [{2}]
|
||||
upgradeHandler.writeHeaders=연결 [{0}], 스트림 [{1}]
|
||||
upgradeHandler.writePushHeaders=연결 [{0}], 스트림 [{1}], Push된 스트림 [{2}], EndOfStream [{3}]
|
||||
|
||||
windowAllocationManager.dispatched=연결 [{0}], 스트림 [{1}]에 디스패치됩니다.
|
||||
windowAllocationManager.notified=연결 [{0}], 스트림 [{1}]에 통지됩니다.
|
||||
windowAllocationManager.notify=연결 [{0}], 스트림 [{1}], 대기 타입 [{2}], 통지 타입 [{3}]
|
||||
windowAllocationManager.waitFor.connection=연결 [{0}], 스트림 [{1}], 제한 시간 [{2}] 내에서, 연결 흐름 제어 윈도우(blocking)를 대기합니다.
|
||||
windowAllocationManager.waitFor.ise=연결 [{0}], 스트림 [{1}], 이미 대기 중입니다.
|
||||
windowAllocationManager.waitFor.stream=연결 [{0}], 스트림 [{1}], 제한 시간 [{2}] 내에서, 스트림 흐름 제어 윈도우(blocking)를 대기합니다.
|
||||
windowAllocationManager.waitForNonBlocking.connection=연결 [{0}], 스트림 [{1}], 연결 흐름 제어 윈도우(non-blocking)를 대기합니다.
|
||||
windowAllocationManager.waitForNonBlocking.stream=연결 [{0}], 스트림 [{1}], 스트림 흐름 제어 윈도우(non-blocking)를 대기합니다.
|
||||
|
||||
writeStateMachine.endWrite.ise=쓰기가 한번 완료되고 나면, 새로운 상태를 위해 [{0}]을(를) 지정하는 것은 불허됩니다.
|
||||
writeStateMachine.ise=[{1}]인 상태에서 [{0}()]을(를) 호출하는 것은 불허됩니다.
|
||||
16
java/org/apache/coyote/http2/LocalStrings_ru.properties
Normal file
16
java/org/apache/coyote/http2/LocalStrings_ru.properties
Normal file
@@ -0,0 +1,16 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
upgradeHandler.pingFailed=Соединение [{0}], ошибка при передаче ping''а клиенту
|
||||
95
java/org/apache/coyote/http2/LocalStrings_zh_CN.properties
Normal file
95
java/org/apache/coyote/http2/LocalStrings_zh_CN.properties
Normal file
@@ -0,0 +1,95 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
abstractStream.windowSizeDec=连接[{0}],流[{1}],将流控制窗口减少[{2}]到[{3}]
|
||||
abstractStream.windowSizeInc=连接 [{0}], 流 [{1}], 增加流量控制窗口[{2}] 到 [{3}]
|
||||
abstractStream.windowSizeTooBig=连接[{0}],流[{1}],窗口大小从[{2}]增加到[{3}],超过了允许的最大值
|
||||
|
||||
connectionPrefaceParser.mismatch=请求了新的远程流ID[{0}],但所有远程流都必须使用奇数标识符。
|
||||
|
||||
connectionSettings.debug=连接[{0}],参数类型[{1}]设置为[{2}]
|
||||
connectionSettings.enablePushInvalid=连接[{0}],请求的enable push[{1}]值不是允许的值之一(零或一)
|
||||
connectionSettings.headerTableSizeLimit=连接 [{0}],尝试将 header 表大小设置为 [{1}],但限制为 16k
|
||||
connectionSettings.maxFrameSizeInvalid=连接[{0}],请求的最大帧大小[{1}]在[{2}]到[{3}]的允许范围之外
|
||||
connectionSettings.unknown=连接[{0}],标识为[{1}]和值为[{2}]的未知设置被忽略
|
||||
|
||||
frameType.checkPayloadSize=对帧类型[{1}]来说,负载[{0}]是无效的
|
||||
frameType.checkStream=无效的帧类型[{0}]
|
||||
|
||||
hpack.integerEncodedOverTooManyOctets=HPACK 可变长度整数编码过多的八位字节,最大值为[{0}]
|
||||
hpack.invalidCharacter=代码点[{1}]处的Unicode字符[{0}]无法编码,因为它超出了允许的0到255范围。
|
||||
|
||||
hpackdecoder.tableSizeUpdateNotAtStart=任何表大小的更新都必须在头块开始时发送。
|
||||
|
||||
http2Parser.headerLimitCount=连接[{0}],流[{1}],标题太多
|
||||
http2Parser.headerLimitSize=连接[{0}],Stream[{1}],总的头信息尺寸太大
|
||||
http2Parser.headers.wrongStream=连接[{0}], 头部信息对于流[{1}]正在进行但对于流[{2}]的一帧已经收到了。
|
||||
http2Parser.nonZeroPadding=连接[{0}],流[{1}],非零填充
|
||||
http2Parser.preface.invalid=出现无效连接
|
||||
http2Parser.processFrame.unexpectedType=需要帧类型[{0}],但收到帧类型[{1}]
|
||||
http2Parser.processFrameData.window=连接[{0}],客户端发送的数据比流窗口允许的多
|
||||
http2Parser.processFrameHeaders.decodingDataLeft=数据在HPACK解码后依然保留 - 它本应该被消费掉
|
||||
http2Parser.processFrameHeaders.payload=连接:[{0}],流:[{1}],正在处理[{1}]大小的头文件负载
|
||||
http2Parser.processFramePriority.invalidParent=连接[{0}],流[{1}],流可能不依赖于自身
|
||||
http2Parser.processFramePushPromise=请求了新的远程流ID[{0}],但所有远程流都必须使用奇数标识符\n\
|
||||
\n
|
||||
http2Parser.swallow.debug=连接:[{0}],流:[{1}],吞下[{2}]字节
|
||||
|
||||
stream.closed=连接[{0}],流[{1}],一旦关闭就无法写入流
|
||||
stream.header.debug=连接[{0}],流[{1}],HTTP标头[{2}],值[{3}]
|
||||
stream.header.noPath=连接[{0}],流[{1}],[:path]伪标头为空
|
||||
stream.header.required=连接 [{0}], 流 [{1}], 缺少一个或多个必要的头文件
|
||||
stream.header.unknownPseudoHeader=收到连接[{0}],流[{1}],未知伪标头[{2}]
|
||||
stream.inputBuffer.readTimeout=等待从客户端读取数据超时
|
||||
stream.inputBuffer.reset=流.重置
|
||||
stream.inputBuffer.signal=读线程在等待时,数据被添加到inBuffer中。 发信号通知该线程继续
|
||||
stream.reprioritisation.debug=连接[{0}],流[{1}],独占[{2}],父[{3}],权重[{4}]
|
||||
stream.reset.fail=连接[{0}],流[{1}],重置流失败
|
||||
stream.writeTimeout=等待客户端增加流控制窗口以允许写入流数据的超时
|
||||
|
||||
streamProcessor.cancel=连接到[{0}],Stream [{1}],
|
||||
streamProcessor.error.connection=连接[{0}],Stream[{0}],处理中发生错误,对连接来说是致命的。
|
||||
streamProcessor.service.error=请求处理期间出错
|
||||
|
||||
streamStateMachine.debug.change=(:连接[{0}],流[{1}],状态从[{2}]更改为[{3}]
|
||||
|
||||
upgradeHandler.allocate.left=连接[{0}],流[{1}],[{2}]字节未分配 - 尝试分配给子项
|
||||
upgradeHandler.allocate.recipient=(:连接[{0}],流[{1}],潜在接收者[{2}],权重为[{3}]
|
||||
upgradeHandler.goaway.debug=连接[{0}],离开,最后的流[{1}],错误码[{2}],调试数据[{3}]
|
||||
upgradeHandler.init=连接[{0}],状态[{1}]
|
||||
upgradeHandler.ioerror=连接[{0}]
|
||||
upgradeHandler.pingFailed=对客户端发送ping 链接失败.
|
||||
upgradeHandler.prefaceReceived=连接[{0}],从客户端收到连接准备。
|
||||
upgradeHandler.pruneIncomplete=连接[{0}],流[{1}],无法完全修剪连接,因为有[{2}]个活动流太多
|
||||
upgradeHandler.prunedPriority=连接[{0}]已经成为了属于优先级树中未使用的流[{1}]
|
||||
upgradeHandler.rst.debug=连接[{0}],流[{1}],错误[{2}],消息[{3}],RST(关闭流)
|
||||
upgradeHandler.sendPrefaceFail=连接[{0}],给客户端发送前言失败
|
||||
upgradeHandler.socketCloseFailed=关闭 socket 错误
|
||||
upgradeHandler.stream.closed=流[{0}]已经关闭了一段时间
|
||||
upgradeHandler.stream.even=\ 请求了新的远程流ID[{0}],但所有远程流都必须使用奇数标识符\n\
|
||||
\n
|
||||
upgradeHandler.tooMuchOverhead=连接[{0}],开销过大,连接将关闭
|
||||
upgradeHandler.upgrade=连接[{0}], HTTP/1.1 升级到流[1]
|
||||
upgradeHandler.upgrade.fail=):连接[{0}],http/1.1升级失败
|
||||
upgradeHandler.upgradeDispatch.entry=条目,连接[{0}],SocketStatus [{1}]
|
||||
upgradeHandler.upgradeDispatch.exit=退出,连接[{0}], SocketState[{1}]
|
||||
upgradeHandler.windowSizeTooBig=连接[{0}],流[{1}],窗口太大
|
||||
upgradeHandler.writeBody=连接 [{0}],数据流[{1}], 数据长度[{2}]
|
||||
upgradeHandler.writeHeaders=连接 [{0}],流 [{1}]
|
||||
|
||||
windowAllocationManager.notify=连接[{0}], 流[{1}], 等待类型[{2}], 通知类型[{3}]
|
||||
windowAllocationManager.waitFor.ise=连接[{0}], 流[{1}], 已经准备好
|
||||
|
||||
writeStateMachine.ise=处于 [{1}] 状态时调用 [{0}()] 方法是非法的
|
||||
68
java/org/apache/coyote/http2/Setting.java
Normal file
68
java/org/apache/coyote/http2/Setting.java
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
public enum Setting {
|
||||
HEADER_TABLE_SIZE(1),
|
||||
ENABLE_PUSH(2),
|
||||
MAX_CONCURRENT_STREAMS(3),
|
||||
INITIAL_WINDOW_SIZE(4),
|
||||
MAX_FRAME_SIZE(5),
|
||||
MAX_HEADER_LIST_SIZE(6),
|
||||
UNKNOWN(Integer.MAX_VALUE);
|
||||
|
||||
private final int id;
|
||||
|
||||
private Setting (int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Integer.toString(id);
|
||||
}
|
||||
|
||||
public static Setting valueOf(int i) {
|
||||
switch(i) {
|
||||
case 1: {
|
||||
return HEADER_TABLE_SIZE;
|
||||
}
|
||||
case 2: {
|
||||
return ENABLE_PUSH;
|
||||
}
|
||||
case 3: {
|
||||
return MAX_CONCURRENT_STREAMS;
|
||||
}
|
||||
case 4: {
|
||||
return INITIAL_WINDOW_SIZE;
|
||||
}
|
||||
case 5: {
|
||||
return MAX_FRAME_SIZE;
|
||||
}
|
||||
case 6: {
|
||||
return MAX_HEADER_LIST_SIZE;
|
||||
}
|
||||
default: {
|
||||
return Setting.UNKNOWN;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1291
java/org/apache/coyote/http2/Stream.java
Normal file
1291
java/org/apache/coyote/http2/Stream.java
Normal file
File diff suppressed because it is too large
Load Diff
37
java/org/apache/coyote/http2/StreamException.java
Normal file
37
java/org/apache/coyote/http2/StreamException.java
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
/**
|
||||
* Thrown when an HTTP/2 stream error occurs.
|
||||
*/
|
||||
public class StreamException extends Http2Exception {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private final int streamId;
|
||||
|
||||
public StreamException(String msg, Http2Error error, int streamId) {
|
||||
super(msg, error);
|
||||
this.streamId = streamId;
|
||||
}
|
||||
|
||||
|
||||
public int getStreamId() {
|
||||
return streamId;
|
||||
}
|
||||
}
|
||||
401
java/org/apache/coyote/http2/StreamProcessor.java
Normal file
401
java/org/apache/coyote/http2/StreamProcessor.java
Normal file
@@ -0,0 +1,401 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.coyote.AbstractProcessor;
|
||||
import org.apache.coyote.ActionCode;
|
||||
import org.apache.coyote.Adapter;
|
||||
import org.apache.coyote.ContainerThreadMarker;
|
||||
import org.apache.coyote.ErrorState;
|
||||
import org.apache.coyote.Request;
|
||||
import org.apache.coyote.Response;
|
||||
import org.apache.coyote.http11.filters.GzipOutputFilter;
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.buf.ByteChunk;
|
||||
import org.apache.tomcat.util.http.FastHttpDateFormat;
|
||||
import org.apache.tomcat.util.http.MimeHeaders;
|
||||
import org.apache.tomcat.util.net.AbstractEndpoint.Handler.SocketState;
|
||||
import org.apache.tomcat.util.net.DispatchType;
|
||||
import org.apache.tomcat.util.net.SocketEvent;
|
||||
import org.apache.tomcat.util.net.SocketWrapperBase;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
class StreamProcessor extends AbstractProcessor {
|
||||
|
||||
private static final Log log = LogFactory.getLog(StreamProcessor.class);
|
||||
private static final StringManager sm = StringManager.getManager(StreamProcessor.class);
|
||||
|
||||
private final Http2UpgradeHandler handler;
|
||||
private final Stream stream;
|
||||
|
||||
|
||||
StreamProcessor(Http2UpgradeHandler handler, Stream stream, Adapter adapter,
|
||||
SocketWrapperBase<?> socketWrapper) {
|
||||
super(socketWrapper.getEndpoint(), stream.getCoyoteRequest(), stream.getCoyoteResponse());
|
||||
this.handler = handler;
|
||||
this.stream = stream;
|
||||
setAdapter(adapter);
|
||||
setSocketWrapper(socketWrapper);
|
||||
}
|
||||
|
||||
|
||||
final void process(SocketEvent event) {
|
||||
try {
|
||||
// FIXME: the regular processor syncs on socketWrapper, but here this deadlocks
|
||||
synchronized (this) {
|
||||
// HTTP/2 equivalent of AbstractConnectionHandler#process() without the
|
||||
// socket <-> processor mapping
|
||||
ContainerThreadMarker.set();
|
||||
SocketState state = SocketState.CLOSED;
|
||||
try {
|
||||
state = process(socketWrapper, event);
|
||||
|
||||
if (state == SocketState.LONG) {
|
||||
handler.getProtocol().getHttp11Protocol().addWaitingProcessor(this);
|
||||
} else if (state == SocketState.CLOSED) {
|
||||
handler.getProtocol().getHttp11Protocol().removeWaitingProcessor(this);
|
||||
if (!getErrorState().isConnectionIoAllowed()) {
|
||||
ConnectionException ce = new ConnectionException(sm.getString(
|
||||
"streamProcessor.error.connection", stream.getConnectionId(),
|
||||
stream.getIdentifier()), Http2Error.INTERNAL_ERROR);
|
||||
stream.close(ce);
|
||||
} else if (!getErrorState().isIoAllowed()) {
|
||||
StreamException se = stream.getResetException();
|
||||
if (se == null) {
|
||||
se = new StreamException(sm.getString(
|
||||
"streamProcessor.error.stream", stream.getConnectionId(),
|
||||
stream.getIdentifier()), Http2Error.INTERNAL_ERROR,
|
||||
stream.getIdAsInt());
|
||||
}
|
||||
stream.close(se);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
String msg = sm.getString("streamProcessor.error.connection",
|
||||
stream.getConnectionId(), stream.getIdentifier());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(msg, e);
|
||||
}
|
||||
ConnectionException ce = new ConnectionException(msg, Http2Error.INTERNAL_ERROR);
|
||||
ce.initCause(e);
|
||||
stream.close(ce);
|
||||
} finally {
|
||||
ContainerThreadMarker.clear();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
handler.executeQueuedStream();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void prepareResponse() throws IOException {
|
||||
response.setCommitted(true);
|
||||
prepareHeaders(request, response, handler.getProtocol(), stream);
|
||||
stream.writeHeaders();
|
||||
}
|
||||
|
||||
|
||||
// Static so it can be used by Stream to build the MimeHeaders required for
|
||||
// an ACK. For that use case coyoteRequest, protocol and stream will be null.
|
||||
static void prepareHeaders(Request coyoteRequest, Response coyoteResponse,
|
||||
Http2Protocol protocol, Stream stream) {
|
||||
MimeHeaders headers = coyoteResponse.getMimeHeaders();
|
||||
int statusCode = coyoteResponse.getStatus();
|
||||
|
||||
// Add the pseudo header for status
|
||||
headers.addValue(":status").setString(Integer.toString(statusCode));
|
||||
|
||||
// Check to see if a response body is present
|
||||
if (!(statusCode < 200 || statusCode == 204 || statusCode == 205 || statusCode == 304)) {
|
||||
String contentType = coyoteResponse.getContentType();
|
||||
if (contentType != null) {
|
||||
headers.setValue("content-type").setString(contentType);
|
||||
}
|
||||
String contentLanguage = coyoteResponse.getContentLanguage();
|
||||
if (contentLanguage != null) {
|
||||
headers.setValue("content-language").setString(contentLanguage);
|
||||
}
|
||||
// Add a content-length header if a content length has been set unless
|
||||
// the application has already added one
|
||||
long contentLength = coyoteResponse.getContentLengthLong();
|
||||
if (contentLength != -1 && headers.getValue("content-length") == null) {
|
||||
headers.addValue("content-length").setLong(contentLength);
|
||||
}
|
||||
} else {
|
||||
if (statusCode == 205) {
|
||||
// RFC 7231 requires the server to explicitly signal an empty
|
||||
// response in this case
|
||||
coyoteResponse.setContentLength(0);
|
||||
} else {
|
||||
coyoteResponse.setContentLength(-1);
|
||||
}
|
||||
}
|
||||
|
||||
// Add date header unless it is an informational response or the
|
||||
// application has already set one
|
||||
if (statusCode >= 200 && headers.getValue("date") == null) {
|
||||
headers.addValue("date").setString(FastHttpDateFormat.getCurrentDate());
|
||||
}
|
||||
|
||||
if (protocol != null && protocol.useCompression(coyoteRequest, coyoteResponse)) {
|
||||
// Enable compression. Headers will have been set. Need to configure
|
||||
// output filter at this point.
|
||||
stream.addOutputFilter(new GzipOutputFilter());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void finishResponse() throws IOException {
|
||||
stream.getOutputBuffer().end();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void ack() {
|
||||
if (!response.isCommitted() && request.hasExpectation()) {
|
||||
try {
|
||||
stream.writeAck();
|
||||
} catch (IOException ioe) {
|
||||
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, ioe);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void flush() throws IOException {
|
||||
stream.getOutputBuffer().flush();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final int available(boolean doRead) {
|
||||
return stream.getInputBuffer().available();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void setRequestBody(ByteChunk body) {
|
||||
stream.getInputBuffer().insertReplayedBody(body);
|
||||
try {
|
||||
stream.receivedEndOfStream();
|
||||
} catch (ConnectionException e) {
|
||||
// Exception will not be thrown in this case
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void setSwallowResponse() {
|
||||
// NO-OP
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void disableSwallowRequest() {
|
||||
// NO-OP
|
||||
// HTTP/2 has to swallow any input received to ensure that the flow
|
||||
// control windows are correctly tracked.
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void processSocketEvent(SocketEvent event, boolean dispatch) {
|
||||
if (dispatch) {
|
||||
handler.processStreamOnContainerThread(this, event);
|
||||
} else {
|
||||
this.process(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final boolean isReadyForRead() {
|
||||
return stream.getInputBuffer().isReadyForRead();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final boolean isRequestBodyFullyRead() {
|
||||
return stream.getInputBuffer().isRequestBodyFullyRead();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void registerReadInterest() {
|
||||
// Should never be called for StreamProcessor as isReadyForRead() is
|
||||
// overridden
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final boolean isReadyForWrite() {
|
||||
return stream.isReadyForWrite();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void executeDispatches() {
|
||||
Iterator<DispatchType> dispatches = getIteratorAndClearDispatches();
|
||||
/*
|
||||
* Compare with superclass that uses SocketWrapper
|
||||
* A sync is not necessary here as the window sizes are updated with
|
||||
* syncs before the dispatches are executed and it is the window size
|
||||
* updates that need to be complete before the dispatch executes.
|
||||
*/
|
||||
while (dispatches != null && dispatches.hasNext()) {
|
||||
DispatchType dispatchType = dispatches.next();
|
||||
/*
|
||||
* Dispatch on new thread.
|
||||
* Firstly, this avoids a deadlock on the SocketWrapper as Streams
|
||||
* being processed by container threads lock the SocketProcessor
|
||||
* before they lock the SocketWrapper which is the opposite order to
|
||||
* container threads processing via Http2UpgrageHandler.
|
||||
* Secondly, this code executes after a Window update has released
|
||||
* one or more Streams. By dispatching each Stream to a dedicated
|
||||
* thread, those Streams may progress concurrently.
|
||||
*/
|
||||
processSocketEvent(dispatchType.getSocketStatus(), true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final boolean isPushSupported() {
|
||||
return stream.isPushSupported();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final void doPush(Request pushTarget) {
|
||||
try {
|
||||
stream.push(pushTarget);
|
||||
} catch (IOException ioe) {
|
||||
setErrorState(ErrorState.CLOSE_CONNECTION_NOW, ioe);
|
||||
response.setErrorException(ioe);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void recycle() {
|
||||
// StreamProcessor instances are not re-used.
|
||||
// Clear fields that can be cleared to aid GC and trigger NPEs if this
|
||||
// is reused
|
||||
setSocketWrapper(null);
|
||||
setAdapter(null);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Log getLog() {
|
||||
return log;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void pause() {
|
||||
// NO-OP. Handled by the Http2UpgradeHandler
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public SocketState service(SocketWrapperBase<?> socket) throws IOException {
|
||||
try {
|
||||
adapter.service(request, response);
|
||||
} catch (Exception e) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("streamProcessor.service.error"), e);
|
||||
}
|
||||
response.setStatus(500);
|
||||
setErrorState(ErrorState.CLOSE_NOW, e);
|
||||
}
|
||||
|
||||
if (!isAsync()) {
|
||||
// If this is an async request then the request ends when it has
|
||||
// been completed. The AsyncContext is responsible for calling
|
||||
// endRequest() in that case.
|
||||
endRequest();
|
||||
}
|
||||
|
||||
if (getErrorState().isError()) {
|
||||
action(ActionCode.CLOSE, null);
|
||||
request.updateCounters();
|
||||
return SocketState.CLOSED;
|
||||
} else if (isAsync()) {
|
||||
return SocketState.LONG;
|
||||
} else {
|
||||
action(ActionCode.CLOSE, null);
|
||||
request.updateCounters();
|
||||
return SocketState.CLOSED;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean flushBufferedWrite() throws IOException {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("streamProcessor.flushBufferedWrite.entry",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
if (stream.flush(false)) {
|
||||
// The buffer wasn't fully flushed so re-register the
|
||||
// stream for write. Note this does not go via the
|
||||
// Response since the write registration state at
|
||||
// that level should remain unchanged. Once the buffer
|
||||
// has been emptied then the code below will call
|
||||
// dispatch() which will enable the
|
||||
// Response to respond to this event.
|
||||
if (stream.isReadyForWrite()) {
|
||||
// Unexpected
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected final SocketState dispatchEndRequest() throws IOException {
|
||||
endRequest();
|
||||
return SocketState.CLOSED;
|
||||
}
|
||||
|
||||
|
||||
private void endRequest() throws IOException {
|
||||
if (!stream.isInputFinished() && getErrorState().isIoAllowed()) {
|
||||
// The request has been processed but the request body has not been
|
||||
// fully read. This typically occurs when Tomcat rejects an upload
|
||||
// of some form (e.g. PUT or POST). Need to tell the client not to
|
||||
// send any more data but only if a reset has not already been
|
||||
// triggered.
|
||||
StreamException se = new StreamException(
|
||||
sm.getString("streamProcessor.cancel", stream.getConnectionId(),
|
||||
stream.getIdentifier()), Http2Error.CANCEL, stream.getIdAsInt());
|
||||
handler.sendStreamReset(se);
|
||||
}
|
||||
}
|
||||
}
|
||||
37
java/org/apache/coyote/http2/StreamRunnable.java
Normal file
37
java/org/apache/coyote/http2/StreamRunnable.java
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import org.apache.tomcat.util.net.SocketEvent;
|
||||
|
||||
class StreamRunnable implements Runnable {
|
||||
|
||||
private final StreamProcessor processor;
|
||||
private final SocketEvent event;
|
||||
|
||||
|
||||
public StreamRunnable(StreamProcessor processor, SocketEvent event) {
|
||||
this.processor = processor;
|
||||
this.event = event;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
processor.process(event);
|
||||
}
|
||||
}
|
||||
257
java/org/apache/coyote/http2/StreamStateMachine.java
Normal file
257
java/org/apache/coyote/http2/StreamStateMachine.java
Normal file
@@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
/**
|
||||
* See <a href="https://tools.ietf.org/html/rfc7540#section-5.1">state
|
||||
* diagram</a> in RFC 7540.
|
||||
* <br>
|
||||
* The following additions are supported by this state machine:
|
||||
* <ul>
|
||||
* <li>differentiate between closed (normal) and closed caused by reset</li>
|
||||
* </ul>
|
||||
*
|
||||
*/
|
||||
public class StreamStateMachine {
|
||||
|
||||
private static final Log log = LogFactory.getLog(StreamStateMachine.class);
|
||||
private static final StringManager sm = StringManager.getManager(StreamStateMachine.class);
|
||||
|
||||
private final Stream stream;
|
||||
private State state;
|
||||
|
||||
|
||||
public StreamStateMachine(Stream stream) {
|
||||
this.stream = stream;
|
||||
stateChange(null, State.IDLE);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void sentPushPromise() {
|
||||
stateChange(State.IDLE, State.RESERVED_LOCAL);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void receivedPushPromise() {
|
||||
stateChange(State.IDLE, State.RESERVED_REMOTE);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void sentStartOfHeaders() {
|
||||
stateChange(State.IDLE, State.OPEN);
|
||||
stateChange(State.RESERVED_LOCAL, State.HALF_CLOSED_REMOTE);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void receivedStartOfHeaders() {
|
||||
stateChange(State.IDLE, State.OPEN);
|
||||
stateChange(State.RESERVED_REMOTE, State.HALF_CLOSED_LOCAL);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void sentEndOfStream() {
|
||||
stateChange(State.OPEN, State.HALF_CLOSED_LOCAL);
|
||||
stateChange(State.HALF_CLOSED_REMOTE, State.CLOSED_TX);
|
||||
}
|
||||
|
||||
|
||||
public synchronized void receivedEndOfStream() {
|
||||
stateChange(State.OPEN, State.HALF_CLOSED_REMOTE);
|
||||
stateChange(State.HALF_CLOSED_LOCAL, State.CLOSED_RX);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Marks the stream as reset. This method will not change the stream state
|
||||
* if:
|
||||
* <ul>
|
||||
* <li>The stream is already reset</li>
|
||||
* <li>The stream is already closed</li>
|
||||
* </ul>
|
||||
*
|
||||
* @throws IllegalStateException If the stream is in a state that does not
|
||||
* permit resets
|
||||
*/
|
||||
public synchronized void sendReset() {
|
||||
if (state == State.IDLE) {
|
||||
throw new IllegalStateException(sm.getString("streamStateMachine.debug.change",
|
||||
stream.getConnectionId(), stream.getIdentifier(), state));
|
||||
}
|
||||
if (state.canReset()) {
|
||||
stateChange(state, State.CLOSED_RST_TX);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
final synchronized void receivedReset() {
|
||||
stateChange(state, State.CLOSED_RST_RX);
|
||||
}
|
||||
|
||||
|
||||
private void stateChange(State oldState, State newState) {
|
||||
if (state == oldState) {
|
||||
state = newState;
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("streamStateMachine.debug.change", stream.getConnectionId(),
|
||||
stream.getIdentifier(), oldState, newState));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public synchronized void checkFrameType(FrameType frameType) throws Http2Exception {
|
||||
// No state change. Checks that receiving the frame type is valid for
|
||||
// the current state of this stream.
|
||||
if (!isFrameTypePermitted(frameType)) {
|
||||
if (state.connectionErrorForInvalidFrame) {
|
||||
throw new ConnectionException(sm.getString("streamStateMachine.invalidFrame",
|
||||
stream.getConnectionId(), stream.getIdentifier(), state, frameType),
|
||||
state.errorCodeForInvalidFrame);
|
||||
} else {
|
||||
throw new StreamException(sm.getString("streamStateMachine.invalidFrame",
|
||||
stream.getConnectionId(), stream.getIdentifier(), state, frameType),
|
||||
state.errorCodeForInvalidFrame, stream.getIdAsInt());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public synchronized boolean isFrameTypePermitted(FrameType frameType) {
|
||||
return state.isFrameTypePermitted(frameType);
|
||||
}
|
||||
|
||||
|
||||
public synchronized boolean isActive() {
|
||||
return state.isActive();
|
||||
}
|
||||
|
||||
|
||||
public synchronized boolean canRead() {
|
||||
return state.canRead();
|
||||
}
|
||||
|
||||
|
||||
public synchronized boolean canWrite() {
|
||||
return state.canWrite();
|
||||
}
|
||||
|
||||
|
||||
public synchronized boolean isClosedFinal() {
|
||||
return state == State.CLOSED_FINAL;
|
||||
}
|
||||
|
||||
public synchronized void closeIfIdle() {
|
||||
stateChange(State.IDLE, State.CLOSED_FINAL);
|
||||
}
|
||||
|
||||
|
||||
private enum State {
|
||||
IDLE (false, false, false, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.HEADERS,
|
||||
FrameType.PRIORITY),
|
||||
OPEN (true, true, true, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.DATA,
|
||||
FrameType.HEADERS,
|
||||
FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.PUSH_PROMISE,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
RESERVED_LOCAL (false, false, true, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
RESERVED_REMOTE (false, true, true, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.HEADERS,
|
||||
FrameType.PRIORITY,
|
||||
FrameType.RST),
|
||||
HALF_CLOSED_LOCAL (true, false, true, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.DATA,
|
||||
FrameType.HEADERS,
|
||||
FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.PUSH_PROMISE,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
HALF_CLOSED_REMOTE (false, true, true, true,
|
||||
Http2Error.STREAM_CLOSED, FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
CLOSED_RX (false, false, false, true,
|
||||
Http2Error.STREAM_CLOSED, FrameType.PRIORITY),
|
||||
CLOSED_TX (false, false, false, true,
|
||||
Http2Error.STREAM_CLOSED, FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
CLOSED_RST_RX (false, false, false, false,
|
||||
Http2Error.STREAM_CLOSED, FrameType.PRIORITY),
|
||||
CLOSED_RST_TX (false, false, false, false,
|
||||
Http2Error.STREAM_CLOSED, FrameType.DATA,
|
||||
FrameType.HEADERS,
|
||||
FrameType.PRIORITY,
|
||||
FrameType.RST,
|
||||
FrameType.PUSH_PROMISE,
|
||||
FrameType.WINDOW_UPDATE),
|
||||
CLOSED_FINAL (false, false, false, true,
|
||||
Http2Error.PROTOCOL_ERROR, FrameType.PRIORITY);
|
||||
|
||||
private final boolean canRead;
|
||||
private final boolean canWrite;
|
||||
private final boolean canReset;
|
||||
private final boolean connectionErrorForInvalidFrame;
|
||||
private final Http2Error errorCodeForInvalidFrame;
|
||||
private final Set<FrameType> frameTypesPermitted = new HashSet<>();
|
||||
|
||||
private State(boolean canRead, boolean canWrite, boolean canReset,
|
||||
boolean connectionErrorForInvalidFrame, Http2Error errorCode,
|
||||
FrameType... frameTypes) {
|
||||
this.canRead = canRead;
|
||||
this.canWrite = canWrite;
|
||||
this.canReset = canReset;
|
||||
this.connectionErrorForInvalidFrame = connectionErrorForInvalidFrame;
|
||||
this.errorCodeForInvalidFrame = errorCode;
|
||||
for (FrameType frameType : frameTypes) {
|
||||
frameTypesPermitted.add(frameType);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isActive() {
|
||||
return canWrite || canRead;
|
||||
}
|
||||
|
||||
public boolean canRead() {
|
||||
return canRead;
|
||||
}
|
||||
|
||||
public boolean canWrite() {
|
||||
return canWrite;
|
||||
}
|
||||
|
||||
public boolean canReset() {
|
||||
return canReset;
|
||||
}
|
||||
|
||||
public boolean isFrameTypePermitted(FrameType frameType) {
|
||||
return frameTypesPermitted.contains(frameType);
|
||||
}
|
||||
}
|
||||
}
|
||||
209
java/org/apache/coyote/http2/WindowAllocationManager.java
Normal file
209
java/org/apache/coyote/http2/WindowAllocationManager.java
Normal file
@@ -0,0 +1,209 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.coyote.http2;
|
||||
|
||||
import org.apache.coyote.ActionCode;
|
||||
import org.apache.juli.logging.Log;
|
||||
import org.apache.juli.logging.LogFactory;
|
||||
import org.apache.tomcat.util.res.StringManager;
|
||||
|
||||
/**
|
||||
* Tracks whether the stream is waiting for an allocation to the stream flow
|
||||
* control window, to the connection flow control window or not waiting for an
|
||||
* allocation and only issues allocation notifications when the stream is known
|
||||
* to be waiting for the notification.
|
||||
*
|
||||
* It is possible for a stream to be waiting for a connection allocation when
|
||||
* a stream allocation is made. Therefore this class tracks the type of
|
||||
* allocation that the stream is waiting for to ensure that notifications are
|
||||
* correctly triggered.
|
||||
*
|
||||
* With the implementation at the time of writing, it is not possible for a
|
||||
* stream to receive an unexpected connection notification as these are only
|
||||
* issues to streams in the backlog and a stream must be waiting for a
|
||||
* connection allocation in order to be placed on the backlog. However, as a
|
||||
* precaution, this class protects against unexpected connection notifications.
|
||||
*
|
||||
* It is important for asynchronous processing not to notify unless a
|
||||
* notification is expected else a dispatch will be performed unnecessarily
|
||||
* which may lead to unexpected results.
|
||||
*
|
||||
* A previous implementation used separate locks for the stream and connection
|
||||
* notifications. However, correct handling of allocation waiting requires
|
||||
* holding the stream lock when making the decision to wait. Therefore both
|
||||
* allocations need to wait on the Stream.
|
||||
*/
|
||||
class WindowAllocationManager {
|
||||
|
||||
private static final Log log = LogFactory.getLog(WindowAllocationManager.class);
|
||||
private static final StringManager sm = StringManager.getManager(WindowAllocationManager.class);
|
||||
|
||||
private static final int NONE = 0;
|
||||
private static final int STREAM = 1;
|
||||
private static final int CONNECTION = 2;
|
||||
|
||||
private final Stream stream;
|
||||
|
||||
private int waitingFor = NONE;
|
||||
|
||||
WindowAllocationManager(Stream stream) {
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
void waitForStream(long timeout) throws InterruptedException {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.waitFor.stream",
|
||||
stream.getConnectionId(), stream.getIdentifier(), Long.toString(timeout)));
|
||||
}
|
||||
|
||||
waitFor(STREAM, timeout);
|
||||
}
|
||||
|
||||
|
||||
void waitForConnection(long timeout) throws InterruptedException {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.waitFor.connection",
|
||||
stream.getConnectionId(), stream.getIdentifier(), Long.toString(timeout)));
|
||||
}
|
||||
|
||||
waitFor(CONNECTION, timeout);
|
||||
}
|
||||
|
||||
|
||||
void waitForStreamNonBlocking() {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.waitForNonBlocking.stream",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
|
||||
waitForNonBlocking(STREAM);
|
||||
}
|
||||
|
||||
|
||||
void waitForConnectionNonBlocking() {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.waitForNonBlocking.connection",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
|
||||
waitForNonBlocking(CONNECTION);
|
||||
}
|
||||
|
||||
|
||||
void notifyStream() {
|
||||
notify(STREAM);
|
||||
}
|
||||
|
||||
|
||||
void notifyConnection() {
|
||||
notify(CONNECTION);
|
||||
}
|
||||
|
||||
|
||||
void notifyAny() {
|
||||
notify(STREAM | CONNECTION);
|
||||
}
|
||||
|
||||
|
||||
boolean isWaitingForStream() {
|
||||
return isWaitingFor(STREAM);
|
||||
}
|
||||
|
||||
|
||||
boolean isWaitingForConnection() {
|
||||
return isWaitingFor(CONNECTION);
|
||||
}
|
||||
|
||||
|
||||
private boolean isWaitingFor(int waitTarget) {
|
||||
synchronized (stream) {
|
||||
return (waitingFor & waitTarget) > 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void waitFor(int waitTarget, long timeout) throws InterruptedException {
|
||||
synchronized (stream) {
|
||||
if (waitingFor != NONE) {
|
||||
throw new IllegalStateException(sm.getString("windowAllocationManager.waitFor.ise",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
|
||||
waitingFor = waitTarget;
|
||||
|
||||
if (timeout < 0) {
|
||||
stream.wait();
|
||||
} else {
|
||||
stream.wait(timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void waitForNonBlocking(int waitTarget) {
|
||||
synchronized (stream) {
|
||||
if (waitingFor == NONE) {
|
||||
waitingFor = waitTarget;
|
||||
} else if (waitingFor == waitTarget) {
|
||||
// NO-OP
|
||||
// Non-blocking post-processing may attempt to flush
|
||||
} else {
|
||||
throw new IllegalStateException(sm.getString("windowAllocationManager.waitFor.ise",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void notify(int notifyTarget) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.notify", stream.getConnectionId(),
|
||||
stream.getIdentifier(), Integer.toString(waitingFor), Integer.toString(notifyTarget)));
|
||||
}
|
||||
|
||||
synchronized (stream) {
|
||||
if ((notifyTarget & waitingFor) > NONE) {
|
||||
// Reset this here so multiple notifies (possible with a
|
||||
// backlog containing multiple streams and small window updates)
|
||||
// are handled correctly (only the first should trigger a call
|
||||
// to stream.notify(). Additional notify() calls may trigger
|
||||
// unexpected timeouts.
|
||||
waitingFor = NONE;
|
||||
if (stream.getCoyoteResponse().getWriteListener() == null) {
|
||||
// Blocking, so use notify to release StreamOutputBuffer
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.notified",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
stream.notify();
|
||||
} else {
|
||||
// Non-blocking so dispatch
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(sm.getString("windowAllocationManager.dispatched",
|
||||
stream.getConnectionId(), stream.getIdentifier()));
|
||||
}
|
||||
stream.getCoyoteResponse().action(ActionCode.DISPATCH_WRITE, null);
|
||||
// Need to explicitly execute dispatches on the StreamProcessor
|
||||
// as this thread is being processed by an UpgradeProcessor
|
||||
// which won't see this dispatch
|
||||
stream.getCoyoteResponse().action(ActionCode.DISPATCH_EXECUTE, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user