angular-cn/packages/http/test/static_request_spec.ts

139 lines
4.5 KiB
TypeScript
Raw Normal View History

/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {describe, expect, it} from '@angular/core/testing/src/testing_internal';
fix(http): introduce encodingHint for text() for better ArrayBuffer support Currently, if a Response has an ArrayBuffer body and text() is called, Angular attempts to convert the ArrayBuffer to a string. Doing this requires knowing the encoding of the bytes in the buffer, which is context that we don't have. Instead, we assume that the buffer is encoded in UTF-16, and attempt to process it that way. Unfortunately the approach chosen (interpret buffer as Uint16Array and create a Javascript string from each entry using String.fromCharCode) is incorrect as it does not handle UTF-16 surrogate pairs. What Angular actually implements, then, is UCS-2 decoding, which is equivalent to UTF-16 with characters restricted to the base plane. No standard way of decoding UTF-8 or UTF-16 exists in the browser today. APIs like TextDecoder are only supported in a few browsers, and although hacks like using the FileReader API with a Blob to force browsers to do content encoding detection and decoding exist, they're slow and not compatible with the synchronous text() API. Thus, this bug is fixed by introducing an encodingHint parameter to text(). The default value of this parameter is 'legacy', indicating that the existing broken behavior should be used - this prevents breaking existing apps. The only other possible value of the hint is 'iso-8859' which interprets each byte of the buffer with String.fromCharCode. UTF-8 and UTF-16 are not supported - it is up to the consumer to get the ArrayBuffer and decode it themselves. The parameter is a hint, as it's not always used (for example, if the conversion to text doesn't involve an ArrayBuffer source). Additionally, this leaves the door open for future implementations to perform more sophisticated encoding detection and ignore the user-provided value if it can be proven to be incorrect. Fixes #15932. PR Close #16420
2017-04-28 14:54:40 -04:00
import {ɵgetDOM as getDOM} from '@angular/platform-browser';
import {RequestOptions} from '../src/base_request_options';
import {ContentType} from '../src/enums';
import {Headers} from '../src/headers';
fix(http): introduce encodingHint for text() for better ArrayBuffer support Currently, if a Response has an ArrayBuffer body and text() is called, Angular attempts to convert the ArrayBuffer to a string. Doing this requires knowing the encoding of the bytes in the buffer, which is context that we don't have. Instead, we assume that the buffer is encoded in UTF-16, and attempt to process it that way. Unfortunately the approach chosen (interpret buffer as Uint16Array and create a Javascript string from each entry using String.fromCharCode) is incorrect as it does not handle UTF-16 surrogate pairs. What Angular actually implements, then, is UCS-2 decoding, which is equivalent to UTF-16 with characters restricted to the base plane. No standard way of decoding UTF-8 or UTF-16 exists in the browser today. APIs like TextDecoder are only supported in a few browsers, and although hacks like using the FileReader API with a Blob to force browsers to do content encoding detection and decoding exist, they're slow and not compatible with the synchronous text() API. Thus, this bug is fixed by introducing an encodingHint parameter to text(). The default value of this parameter is 'legacy', indicating that the existing broken behavior should be used - this prevents breaking existing apps. The only other possible value of the hint is 'iso-8859' which interprets each byte of the buffer with String.fromCharCode. UTF-8 and UTF-16 are not supported - it is up to the consumer to get the ArrayBuffer and decode it themselves. The parameter is a hint, as it's not always used (for example, if the conversion to text doesn't involve an ArrayBuffer source). Additionally, this leaves the door open for future implementations to perform more sophisticated encoding detection and ignore the user-provided value if it can be proven to be incorrect. Fixes #15932. PR Close #16420
2017-04-28 14:54:40 -04:00
import {stringToArrayBuffer, stringToArrayBuffer8} from '../src/http_utils';
import {ArrayBuffer, Request} from '../src/static_request';
{
describe('Request', () => {
describe('detectContentType', () => {
it('should return ContentType.NONE', () => {
const req =
new Request(new RequestOptions({url: 'test', method: 'GET', body: null}) as any);
expect(req.detectContentType()).toEqual(ContentType.NONE);
});
it('should return ContentType.JSON', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'application/json'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.JSON);
});
it('should return ContentType.FORM', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'application/x-www-form-urlencoded'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.FORM);
});
it('should return ContentType.FORM_DATA', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'multipart/form-data'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.FORM_DATA);
});
it('should return ContentType.TEXT', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'text/plain'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.TEXT);
});
it('should return ContentType.BLOB', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'application/octet-stream'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.BLOB);
});
it('should not create a blob out of ArrayBuffer', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: new ArrayBuffer(1),
headers: new Headers({'content-type': 'application/octet-stream'})
}) as any);
expect(req.detectContentType()).toEqual(ContentType.ARRAY_BUFFER);
});
});
it('should return empty string if no body is present', () => {
const req = new Request(new RequestOptions({
url: 'test',
method: 'GET',
body: null,
headers: new Headers({'content-type': 'application/json'})
}) as any);
expect(req.text()).toEqual('');
});
it('should return empty string if body is undefined', () => {
const reqOptions = new RequestOptions(
{url: 'test', method: 'GET', headers: new Headers({'content-type': 'application/json'})});
delete reqOptions.body;
const req = new Request(reqOptions as any);
expect(req.text()).toEqual('');
});
it('should use object params', () => {
const req = new Request({url: 'http://test.com', params: {'a': 3, 'b': ['x', 'y']}});
expect(req.url).toBe('http://test.com?a=3&b=x&b=y');
});
it('should use search if present', () => {
const req = new Request({url: 'http://test.com', search: 'a=1&b=2'});
expect(req.url).toBe('http://test.com?a=1&b=2');
fix(http): introduce encodingHint for text() for better ArrayBuffer support Currently, if a Response has an ArrayBuffer body and text() is called, Angular attempts to convert the ArrayBuffer to a string. Doing this requires knowing the encoding of the bytes in the buffer, which is context that we don't have. Instead, we assume that the buffer is encoded in UTF-16, and attempt to process it that way. Unfortunately the approach chosen (interpret buffer as Uint16Array and create a Javascript string from each entry using String.fromCharCode) is incorrect as it does not handle UTF-16 surrogate pairs. What Angular actually implements, then, is UCS-2 decoding, which is equivalent to UTF-16 with characters restricted to the base plane. No standard way of decoding UTF-8 or UTF-16 exists in the browser today. APIs like TextDecoder are only supported in a few browsers, and although hacks like using the FileReader API with a Blob to force browsers to do content encoding detection and decoding exist, they're slow and not compatible with the synchronous text() API. Thus, this bug is fixed by introducing an encodingHint parameter to text(). The default value of this parameter is 'legacy', indicating that the existing broken behavior should be used - this prevents breaking existing apps. The only other possible value of the hint is 'iso-8859' which interprets each byte of the buffer with String.fromCharCode. UTF-8 and UTF-16 are not supported - it is up to the consumer to get the ArrayBuffer and decode it themselves. The parameter is a hint, as it's not always used (for example, if the conversion to text doesn't involve an ArrayBuffer source). Additionally, this leaves the door open for future implementations to perform more sophisticated encoding detection and ignore the user-provided value if it can be proven to be incorrect. Fixes #15932. PR Close #16420
2017-04-28 14:54:40 -04:00
});
if (getDOM().supportsWebAnimation()) {
it('should serialize an ArrayBuffer to string via legacy encoding', () => {
const str = '\u89d2\u5ea6';
expect(new Request({body: stringToArrayBuffer(str), url: '/'}).text()).toEqual(str);
});
it('should serialize an ArrayBuffer to string via iso-8859 encoding', () => {
const str = 'abcd';
expect(new Request({body: stringToArrayBuffer8(str), url: '/'}).text('iso-8859'))
.toEqual(str);
});
}
});
}