Unverified Commit c09bb40e authored by alalamav's avatar alalamav Committed by GitHub

Deprecate server data usage timeframe API (#537)

parent 46b7ee0e
......@@ -157,7 +157,7 @@ export interface AccessKey {
accessUrl: string;
}
// Byte transfer stats for a sliding timeframe, including both inbound and outbound.
// Byte transfer stats for the past 30 days, including both inbound and outbound.
// TODO: this is copied at src/shadowbox/model/metrics.ts. Both copies should
// be kept in sync, until we can find a way to share code between the web_app
// and shadowbox.
......
......@@ -120,7 +120,7 @@ curl --insecure -X DELETE $API_URL/access-keys/2
```
Set an access key data limit
(e.g. limit outbound data transfer for access key 2 to 1MB over a 24 hour sliding timeframe)
(e.g. limit outbound data transfer for access key 2 to 1MB over 30 days)
```
curl -v --insecure -X PUT -H "Content-Type: application/json" -d '{"limit": {"bytes": 1000}}' $API_URL/access-keys/2/data-limit
```
......
......@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import {DataUsageTimeframe} from '../model/metrics';
export type AccessKeyId = string;
export type AccessKeyMetricsId = string;
......@@ -44,7 +42,7 @@ export interface AccessKey {
readonly proxyParams: ProxyParams;
// Admin-controlled, data transfer limit for this access key. Unlimited if unset.
readonly dataLimit?: DataUsage;
// Data transferred by this access key over a timeframe specified by the server.
// Data transferred by this access key over a 30 day sliding timeframe.
readonly dataUsage: DataUsage;
// Returns whether the access key has exceeded its data transfer limit.
isOverDataLimit(): boolean;
......@@ -67,6 +65,4 @@ export interface AccessKeyRepository {
setAccessKeyDataLimit(id: AccessKeyId, limit: DataUsage): Promise<void>;
// Clears the transfer limit for the specified access key. Throws on failure.
removeAccessKeyDataLimit(id: AccessKeyId): Promise<void>;
// Sets the data usage timeframe for access key data limit enforcement. Throws on failure.
setDataUsageTimeframe(timeframe: DataUsageTimeframe): Promise<void>;
}
......@@ -44,9 +44,3 @@ export class InvalidAccessKeyDataLimit extends OutlineError {
super('Must provide a limit with a non-negative integer value for "bytes"');
}
}
export class InvalidDataUsageTimeframe extends OutlineError {
constructor() {
super('Must provide a timeframe with a positive integer values for "hours"');
}
}
......@@ -52,26 +52,6 @@ paths:
description: The requested port wasn't an integer from 1 through 65535, or the request had no port parameter.
'409':
description: The requested port was already in use by another service.
/server/data-usage-timeframe:
put:
description: Sets the sliding timeframe for measuring data usage and enforcing access keys data limits.
tags:
- Server
- Data limit
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/DataUsageTimeframe"
examples:
'0':
value: "{hours: 24}"
responses:
'204':
description: The data usage timeframe was sucessfully changed.
'400':
description: Invalid timeframe value.
/name:
put:
......@@ -314,14 +294,6 @@ components:
type: number
portForNewAccessKeys:
type: integer
dataUsageTimeframe:
$ref: "#/components/schemas/DataUsageTimeframe"
DataUsageTimeframe:
properties:
hours:
type: integer
minimum: 1
DataUsage:
properties:
......
......@@ -144,7 +144,7 @@ async function main() {
}
const accessKeyRepository = new ServerAccessKeyRepository(
serverConfig.data().portForNewAccessKeys, proxyHostname, accessKeyConfig, shadowsocksServer,
prometheusClient, serverConfig.data().dataUsageTimeframe);
prometheusClient);
const metricsReader = new PrometheusUsageMetrics(prometheusClient);
const toMetricsId = (id: AccessKeyId) => {
......
......@@ -16,7 +16,6 @@ import * as net from 'net';
import {InMemoryConfig} from '../infrastructure/json_config';
import {AccessKey, AccessKeyRepository, DataUsage} from '../model/access_key';
import {DataUsageTimeframe} from '../model/metrics';
import {ShadowsocksManagerService} from './manager_service';
import {FakePrometheusClient, FakeShadowsocksServer} from './mocks/mocks';
......@@ -26,7 +25,6 @@ import {SharedMetricsPublisher} from './shared_metrics';
interface ServerInfo {
name: string;
dataUsageTimeframe: DataUsageTimeframe;
}
const NEW_PORT = 12345;
......@@ -75,22 +73,6 @@ describe('ShadowsocksManagerService', () => {
},
done);
});
it('Returns data usage timeframe in server config', (done) => {
const repo = getAccessKeyRepository();
const serverConfig = new InMemoryConfig({} as ServerConfigJson);
const timeframe = {hours: 24 * 30};
serverConfig.data().dataUsageTimeframe = timeframe;
const service = new ShadowsocksManagerService('default name', serverConfig, repo, null, null);
service.getServer(
{params: {}}, {
send: (httpCode, data: ServerInfo) => {
expect(httpCode).toEqual(200);
expect(data.dataUsageTimeframe).toEqual(timeframe);
responseProcessed = true;
}
},
done);
});
});
describe('renameServer', () => {
......@@ -511,58 +493,6 @@ describe('ShadowsocksManagerService', () => {
});
});
describe('setDataUsageTimeframe', () => {
it('sets data usage timeframe', (done) => {
const repo = getAccessKeyRepository();
const serverConfig = new InMemoryConfig({} as ServerConfigJson);
serverConfig.data().dataUsageTimeframe = {hours: 123};
const service = new ShadowsocksManagerService('default name', serverConfig, repo, null, null);
const hours = 456;
const res = {
send: (httpCode, data) => {
expect(httpCode).toEqual(204);
expect(serverConfig.data().dataUsageTimeframe.hours).toEqual(hours);
responseProcessed = true; // required for afterEach to pass.
}
};
service.setDataUsageTimeframe({params: {hours}}, res, done);
});
it('returns 400 when the hours value is missing or invalid', async (done) => {
const repo = getAccessKeyRepository();
const service = new ShadowsocksManagerService('default name', null, repo, null, null);
const res = {send: (httpCode, data) => {}};
service.setDataUsageTimeframe({params: {}}, res, (error) => {
expect(error.statusCode).toEqual(400);
});
service.setDataUsageTimeframe({params: {hours: -1}}, res, (error) => {
expect(error.statusCode).toEqual(400);
});
service.setDataUsageTimeframe({params: {hours: 0}}, res, (error) => {
expect(error.statusCode).toEqual(400);
});
service.setDataUsageTimeframe({params: {hours: 0.1}}, res, (error) => {
expect(error.statusCode).toEqual(400);
responseProcessed = true; // required for afterEach to pass.
done();
});
});
it('returns 500 when the repository throws an exception', async (done) => {
const repo = getAccessKeyRepository();
spyOn(repo, 'setDataUsageTimeframe').and.throwError('cannot write to disk');
const serverConfig = new InMemoryConfig({} as ServerConfigJson);
const service = new ShadowsocksManagerService('default name', serverConfig, repo, null, null);
serverConfig.data().dataUsageTimeframe = {hours: 123};
const res = {send: (httpCode, data) => {}};
service.setDataUsageTimeframe({params: {hours: 456}}, res, (error) => {
expect(error.statusCode).toEqual(500);
// The change should not have been persisted.
expect(serverConfig.data().dataUsageTimeframe.hours).toEqual(123);
responseProcessed = true; // required for afterEach to pass.
done();
});
});
});
describe('getShareMetrics', () => {
it('Returns value from sharedMetrics', (done) => {
const sharedMetrics = fakeSharedMetricsReporter();
......@@ -628,5 +558,5 @@ function fakeSharedMetricsReporter(): SharedMetricsPublisher {
function getAccessKeyRepository(): AccessKeyRepository {
return new ServerAccessKeyRepository(
OLD_PORT, 'hostname', new InMemoryConfig<AccessKeyConfigJson>({accessKeys: [], nextId: 0}),
new FakeShadowsocksServer(), new FakePrometheusClient({}), {hours: 24 * 30});
new FakeShadowsocksServer(), new FakePrometheusClient({}));
}
......@@ -79,8 +79,6 @@ export function bindService(
apiServer.put(
`${apiPrefix}/server/port-for-new-access-keys`,
service.setPortForNewAccessKeys.bind(service));
apiServer.put(
`${apiPrefix}/server/data-usage-timeframe`, service.setDataUsageTimeframe.bind(service));
apiServer.post(`${apiPrefix}/access-keys`, service.createNewAccessKey.bind(service));
apiServer.get(`${apiPrefix}/access-keys`, service.listAccessKeys.bind(service));
......@@ -140,8 +138,6 @@ export class ShadowsocksManagerService {
serverId: this.serverConfig.data().serverId,
metricsEnabled: this.serverConfig.data().metricsEnabled || false,
createdTimestampMs: this.serverConfig.data().createdTimestampMs,
portForNewAccessKeys: this.serverConfig.data().portForNewAccessKeys,
dataUsageTimeframe: this.serverConfig.data().dataUsageTimeframe,
version
});
next();
......@@ -295,40 +291,11 @@ export class ShadowsocksManagerService {
}
}
public setDataUsageTimeframe(req: RequestType, res: ResponseType, next: restify.Next) {
try {
logging.debug(`setDataUsageTimeframe request ${JSON.stringify(req.params)}`);
const hours = req.params.hours;
if (!hours) {
return next(
new restify.MissingParameterError({statusCode: 400}, 'Parameter `hours` is missing'));
}
if (typeof hours !== 'number' ||
!Number.isInteger(hours)) { // The access key repository will validate the value.
return next(new restify.InvalidArgumentError(
{statusCode: 400}, 'Parameter `hours` must be an integer'));
}
const dataUsageTimeframe = {hours};
this.accessKeys.setDataUsageTimeframe(dataUsageTimeframe);
this.serverConfig.data().dataUsageTimeframe = dataUsageTimeframe;
this.serverConfig.write();
res.send(HttpSuccess.NO_CONTENT);
return next();
} catch (error) {
logging.error(error);
if (error instanceof errors.InvalidDataUsageTimeframe) {
return next(new restify.InvalidArgumentError({statusCode: 400}, error.message));
}
return next(new restify.InternalServerError());
}
}
public async getDataUsage(req: RequestType, res: ResponseType, next: restify.Next) {
// TODO(alalama): use AccessKey.dataUsage to avoid querying Prometheus. Deprecate this call in
// the manager in favor of `GET /access-keys`.
try {
const timeframe = this.serverConfig.data().dataUsageTimeframe;
res.send(HttpSuccess.OK, await this.managerMetrics.getOutboundByteTransfer(timeframe));
res.send(HttpSuccess.OK, await this.managerMetrics.getOutboundByteTransfer({hours: 30 * 24}));
return next();
} catch (error) {
logging.error(error);
......
......@@ -19,7 +19,6 @@ import {PortProvider} from '../infrastructure/get_port';
import {InMemoryConfig} from '../infrastructure/json_config';
import {AccessKeyRepository, DataUsage} from '../model/access_key';
import * as errors from '../model/errors';
import {DataUsageTimeframe} from '../model/metrics';
import {FakePrometheusClient, FakeShadowsocksServer} from './mocks/mocks';
import {AccessKeyConfigJson, ServerAccessKeyRepository} from './server_access_key';
......@@ -430,21 +429,6 @@ describe('ServerAccessKeyRepository', () => {
expect(serverAccessKeys[1].id).toEqual(accessKey3.id);
done();
});
it('getDataUsageTimeframe returns the data limit timeframe', async (done) => {
const timeframe = {hours: 12345};
const repo = new RepoBuilder().dataUsageTimeframe(timeframe).build();
expect(repo.getDataUsageTimeframe()).toEqual(timeframe);
done();
});
it('setDataUsageTimeframe sets the data limit timeframe', async (done) => {
const repo = new RepoBuilder().build();
const timeframe = {hours: 12345};
await repo.setDataUsageTimeframe(timeframe);
expect(repo.getDataUsageTimeframe()).toEqual(timeframe);
done();
});
});
// Convenience function to expect that an asynchronous function does not throw an error. Note that
......@@ -480,7 +464,6 @@ class RepoBuilder {
private keyConfig_ = new InMemoryConfig<AccessKeyConfigJson>({accessKeys: [], nextId: 0});
private shadowsocksServer_ = new FakeShadowsocksServer();
private prometheusClient_ = new FakePrometheusClient({});
private dataUsageTimeframe_ = {hours: 30 * 24};
public port(port: number): RepoBuilder {
this.port_ = port;
......@@ -498,14 +481,9 @@ class RepoBuilder {
this.prometheusClient_ = prometheusClient;
return this;
}
public dataUsageTimeframe(dataUsageTimeframe: DataUsageTimeframe) {
this.dataUsageTimeframe_ = dataUsageTimeframe;
return this;
}
public build(): ServerAccessKeyRepository {
return new ServerAccessKeyRepository(
this.port_, 'hostname', this.keyConfig_, this.shadowsocksServer_, this.prometheusClient_,
this.dataUsageTimeframe_);
this.port_, 'hostname', this.keyConfig_, this.shadowsocksServer_, this.prometheusClient_);
}
}
......@@ -22,7 +22,6 @@ import * as logging from '../infrastructure/logging';
import {PrometheusClient} from '../infrastructure/prometheus_scraper';
import {AccessKey, AccessKeyId, AccessKeyMetricsId, AccessKeyRepository, DataUsage, ProxyParams} from '../model/access_key';
import * as errors from '../model/errors';
import {DataUsageTimeframe} from '../model/metrics';
import {ShadowsocksServer} from '../model/shadowsocks_server';
import {PrometheusManagerMetrics} from './manager_metrics';
......@@ -103,8 +102,7 @@ export class ServerAccessKeyRepository implements AccessKeyRepository {
constructor(
private portForNewAccessKeys: number, private proxyHostname: string,
private keyConfig: JsonConfig<AccessKeyConfigJson>,
private shadowsocksServer: ShadowsocksServer, private prometheusClient: PrometheusClient,
private dataLimitTimeframe: DataUsageTimeframe) {
private shadowsocksServer: ShadowsocksServer, private prometheusClient: PrometheusClient) {
if (this.keyConfig.data().accessKeys === undefined) {
this.keyConfig.data().accessKeys = [];
}
......@@ -212,18 +210,6 @@ export class ServerAccessKeyRepository implements AccessKeyRepository {
return Promise.resolve();
}
setDataUsageTimeframe(timeframe: DataUsageTimeframe): Promise<void> {
if (!timeframe || timeframe.hours <= 0) {
throw new errors.InvalidDataUsageTimeframe();
}
this.dataLimitTimeframe = timeframe;
return this.enforceAccessKeyDataLimits();
}
getDataUsageTimeframe(): DataUsageTimeframe {
return this.dataLimitTimeframe;
}
getMetricsId(id: AccessKeyId): AccessKeyMetricsId|undefined {
const accessKey = this.getAccessKey(id);
return accessKey ? accessKey.metricsId : undefined;
......@@ -234,7 +220,7 @@ export class ServerAccessKeyRepository implements AccessKeyRepository {
async enforceAccessKeyDataLimits() {
const metrics = new PrometheusManagerMetrics(this.prometheusClient);
const bytesTransferredById =
(await metrics.getOutboundByteTransfer(this.dataLimitTimeframe)).bytesTransferredByUserId;
(await metrics.getOutboundByteTransfer({hours: 30 * 24})).bytesTransferredByUserId;
let limitStatusChanged = false;
for (const accessKey of this.accessKeys) {
const wasOverDataLimit = accessKey.isOverDataLimit();
......
......@@ -15,7 +15,6 @@
import * as uuidv4 from 'uuid/v4';
import * as json_config from '../infrastructure/json_config';
import {DataUsageTimeframe} from '../model/metrics';
// Serialized format for the server config.
// WARNING: Renaming fields will break backwards-compatibility.
......@@ -32,8 +31,6 @@ export interface ServerConfigJson {
portForNewAccessKeys?: number;
// Which staged rollouts we should force enabled or disabled.
rollouts?: RolloutConfigJson[];
// Sliding timeframe, in hours, used to measure data usage and enforce data limits.
dataUsageTimeframe?: DataUsageTimeframe;
// We don't serialize the shadowbox version, this is obtained dynamically from node.
// Public proxy hostname.
hostname?: string;
......@@ -55,7 +52,6 @@ export function readServerConfig(filename: string): json_config.JsonConfig<Serve
config.data().serverId = config.data().serverId || uuidv4();
config.data().metricsEnabled = config.data().metricsEnabled || false;
config.data().createdTimestampMs = config.data().createdTimestampMs || Date.now();
config.data().dataUsageTimeframe = config.data().dataUsageTimeframe || {hours: 30 * 24};
config.data().hostname = config.data().hostname || process.env.SB_PUBLIC_IP;
config.write();
return config;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment