Signed-off-by: Eric Zhao <sczyh16@gmail.com>master
@@ -25,7 +25,7 @@ public final class ClusterErrorMessages { | |||
public static final String UNEXPECTED_STATUS = "unexpected status"; | |||
public static final String TOO_MANY_REQUESTS = "too many requests (client side)"; | |||
public static final String REQUEST_TIME_OUT = "request time out"; | |||
public static final String CLIENT_NOT_READY = "client not ready (not running or initializing)"; | |||
public static final String CLIENT_NOT_READY = "client not ready"; | |||
public static final String NO_RULES_IN_SERVER = "no rules in token server"; | |||
private ClusterErrorMessages() {} | |||
@@ -104,6 +104,7 @@ public final class ClusterFlowRuleManager { | |||
} | |||
public static void setPropertySupplier(Function<String, SentinelProperty<List<FlowRule>>> propertySupplier) { | |||
AssertUtil.notNull(propertySupplier, "flow rule property supplier cannot be null"); | |||
ClusterFlowRuleManager.propertySupplier = propertySupplier; | |||
} | |||
@@ -44,17 +44,12 @@ public class RateLimiterController implements TrafficShapingController { | |||
@Override | |||
public boolean canPass(Node node, int acquireCount, boolean prioritized) { | |||
/* | |||
1. Pass when acquire count is less or equal than 0 | |||
2. Reject when count is less or equal than 0. | |||
Otherwise,the costTime will be max of long and waitTime will overflow in some cases. | |||
This will lead to pass of following request.It's dangerous!!! | |||
*/ | |||
// Pass when acquire count is less or equal than 0. | |||
if (acquireCount <= 0) { | |||
return true; | |||
} | |||
// Reject when count is less or equal than 0. | |||
// Otherwise,the costTime will be max of long and waitTime will overflow in some cases. | |||
if (count <= 0) { | |||
return false; | |||
} | |||
@@ -27,32 +27,37 @@ import com.alibaba.csp.sentinel.slots.block.flow.TrafficShapingController; | |||
* rate-based, which means that we need to translate rate to QPS. | |||
* </p> | |||
* | |||
* <p> | |||
* Requests arriving at the pulse may drag down long idle systems even though it | |||
* has a much larger handling capability in stable period. It usually happens in | |||
* scenarios that require extra time for initialization, e.g. DB | |||
* establishes a connection; connects to a remote service, and so on. | |||
* | |||
* That’s why we need “warm up”. | |||
* | |||
* Sentinel's "warm-up" implementation is based on the guava-based algorithm. | |||
* However, Guava’s implementation focus on adjusting the request interval, in | |||
* other words, a Leaky bucket. Sentinel pays more attention to controlling the | |||
* count of incoming requests per second without calculating its interval, it is | |||
* more like a “Token bucket.” | |||
* scenarios that require extra time for initialization, e.g. DB establishes a connection, | |||
* connects to a remote service, and so on. That’s why we need “warm up”. | |||
* </p> | |||
* | |||
* <p> | |||
* Sentinel's "warm-up" implementation is based on the Guava's algorithm. | |||
* However, Guava’s implementation focuses on adjusting the request interval, | |||
* which is similar to leaky bucket. Sentinel pays more attention to | |||
* controlling the count of incoming requests per second without calculating its interval, | |||
* which resembles token bucket algorithm. | |||
* </p> | |||
* | |||
* <p> | |||
* The remaining tokens in the bucket is used to measure the system utility. | |||
* Suppose a system can handle b requests per second. Every second b tokens will | |||
* be added into the bucket until the bucket is full. And when system processes | |||
* a request, it takes a token from the bucket. The more tokens left in the | |||
* bucket, the lower the utilization of the system; when the token in the token | |||
* bucket is above a certain threshold, we call it in a "saturation" state. | |||
* </p> | |||
* | |||
* <p> | |||
* Base on Guava’s theory, there is a linear equation we can write this in the | |||
* form y = m * x + b where y (a.k.a y(x)), or qps(q)), is our expected QPS | |||
* given a saturated period (e.g. 3 minutes in), m is the rate of change from | |||
* our cold (minimum) rate to our stable (maximum) rate, x (or q) is the | |||
* occupied token. | |||
* </p> | |||
* | |||
* @author jialiang.linjl | |||
*/ | |||
@@ -38,7 +38,7 @@ public class LogSlot extends AbstractLinkedProcessorSlot<DefaultNode> { | |||
context.getOrigin(), count); | |||
throw e; | |||
} catch (Throwable e) { | |||
RecordLog.info("Entry exception", e); | |||
RecordLog.warn("Unexpected entry exception", e); | |||
} | |||
} | |||
@@ -48,7 +48,7 @@ public class LogSlot extends AbstractLinkedProcessorSlot<DefaultNode> { | |||
try { | |||
fireExit(context, resourceWrapper, count, args); | |||
} catch (Throwable e) { | |||
RecordLog.info("Entry exit exception", e); | |||
RecordLog.warn("Unexpected entry exit exception", e); | |||
} | |||
} | |||
} |
@@ -22,7 +22,13 @@ import java.lang.management.ManagementFactory; | |||
*/ | |||
public final class PidUtil { | |||
/** | |||
* Resolve and get current process ID. | |||
* | |||
* @return current process ID | |||
*/ | |||
public static int getPid() { | |||
// Note: this will trigger local host resolve, which might be slow. | |||
String name = ManagementFactory.getRuntimeMXBean().getName(); | |||
return Integer.parseInt(name.split("@")[0]); | |||
} | |||