http{
lua_code_cache off;}
@@ -20,4 +20,4 @@ location ~*local t = json.decode(str)if t then
ngx.say(" --> ",type(t))
-end
cjson.safe包会在解析失败的时候返回nil
还有一个是redis链接时如果host使用的是域名的话会提示“failed to connect: no resolver defined to resolve “redis.xxxxxx.com””,这里需要使用nginx的resolver指令, resolver 8.8.8.8 valid=3600s;
还有一个是redis链接时如果host使用的是域名的话会提示“failed to connect: no resolver defined to resolve “redis.xxxxxx.com””,这里需要使用nginx的resolver指令, resolver 8.8.8.8 valid=3600s;
\ No newline at end of file
diff --git a/2019/12/26/redis数据结构介绍/index.html b/2019/12/26/redis数据结构介绍/index.html
index 92b03616bb..a340491ca7 100644
--- a/2019/12/26/redis数据结构介绍/index.html
+++ b/2019/12/26/redis数据结构介绍/index.html
@@ -1,4 +1,4 @@
-redis数据结构介绍-第一部分 SDS,链表,字典 | Nicksxs's Blog
这应该是 redis 系列的最后一篇了,讲下快表,其实最前面讲的链表在早先的 redis 版本中也作为 list 的数据结构使用过,但是单纯的链表的缺陷之前也说了,插入便利,但是空间利用率低,并且不能进行二分查找等,检索效率低,ziplist 压缩表的产生也是同理,希望获得更好的性能,包括存储空间和访问性能等,原来我也不懂这个快表要怎么快,然后明白了一个道理,其实并没有什么银弹,只是大牛们会在适合的时候使用最适合的数据结构来实现性能的最大化,这里面有一招就是不同数据结构的组合调整,比如 Java 中的 HashMap,在链表节点数大于 8 时会转变成红黑树,以此提高访问效率,不费话了,回到快表,quicklist,这个数据结构主要使用在 list 类型中,如果我说其实这个 quicklist 就是个链表,可能大家不太会相信,但是事实上的确可以认为 quicklist 是个双向链表,看下代码
/* quicklistNode is a 32 byte struct describing a ziplist for a quicklist.
* We use bit fields keep the quicklistNode at 32 bytes.
* count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k).
* encoding: 2 bits, RAW=1, LZF=2.
diff --git a/2020/04/12/redis系列介绍七/index.html b/2020/04/12/redis系列介绍七/index.html
index 8d90dcfe3b..3d78ec3778 100644
--- a/2020/04/12/redis系列介绍七/index.html
+++ b/2020/04/12/redis系列介绍七/index.html
@@ -1,4 +1,4 @@
-redis系列介绍七-过期策略 | Nicksxs's Blog
/* This function is called when we are going to perform some operation
* in a given key, but such key may be already logically expired even if
* it still exists in the database. The main way this function is called
* is via lookupKey*() family of functions.
diff --git a/2020/04/18/redis系列介绍八/index.html b/2020/04/18/redis系列介绍八/index.html
index 9af6978d25..dd117a6a0a 100644
--- a/2020/04/18/redis系列介绍八/index.html
+++ b/2020/04/18/redis系列介绍八/index.html
@@ -1,4 +1,4 @@
-redis系列介绍八-淘汰策略 | Nicksxs's Blog
在前司和目前公司,用的配置中心都是使用的 Apollo,经过了业界验证,比较强大的配置管理系统,特别是在0.10 后开始支持对使用 value 注解的配置值进行自动更新,今天刚好有个同学问到我,就顺便写篇文章记录下,其实也是借助于 spring 强大的 bean 生命周期管理,可以实现BeanPostProcessor接口,使用postProcessBeforeInitialization方法,来对bean 内部的属性和方法进行判断,是否有 value 注解,如果有就是将它注册到一个 map 中,可以看到这个方法com.ctrip.framework.apollo.spring.annotation.SpringValueProcessor#processField
@Override
+Apollo 的 value 注解是怎么自动更新的 | Nicksxs's Blog
在前司和目前公司,用的配置中心都是使用的 Apollo,经过了业界验证,比较强大的配置管理系统,特别是在0.10 后开始支持对使用 value 注解的配置值进行自动更新,今天刚好有个同学问到我,就顺便写篇文章记录下,其实也是借助于 spring 强大的 bean 生命周期管理,可以实现BeanPostProcessor接口,使用postProcessBeforeInitialization方法,来对bean 内部的属性和方法进行判断,是否有 value 注解,如果有就是将它注册到一个 map 中,可以看到这个方法com.ctrip.framework.apollo.spring.annotation.SpringValueProcessor#processField
@OverrideprotectedvoidprocessField(Object bean,String beanName,Field field){// register @Value on fieldValue value = field.getAnnotation(Value.class);
@@ -61,4 +61,4 @@
updateSpringValue(val);}}
- }
其实原理很简单,就是得了解知道下
0%
\ No newline at end of file
+ }
其实原理很简单,就是得了解知道下
0%
\ No newline at end of file
diff --git a/baidusitemap.xml b/baidusitemap.xml
index 748f8a8502..b9acbe06bf 100644
--- a/baidusitemap.xml
+++ b/baidusitemap.xml
@@ -169,11 +169,11 @@
2022-06-11
- https://nicksxs.me/2022/02/27/Disruptor-%E7%B3%BB%E5%88%97%E4%BA%8C/
+ https://nicksxs.me/2020/08/22/Filter-Intercepter-Aop-%E5%95%A5-%E5%95%A5-%E5%95%A5-%E8%BF%99%E4%BA%9B%E9%83%BD%E6%98%AF%E5%95%A5/2022-06-11
- https://nicksxs.me/2020/08/22/Filter-Intercepter-Aop-%E5%95%A5-%E5%95%A5-%E5%95%A5-%E8%BF%99%E4%BA%9B%E9%83%BD%E6%98%AF%E5%95%A5/
+ https://nicksxs.me/2022/02/27/Disruptor-%E7%B3%BB%E5%88%97%E4%BA%8C/2022-06-11
@@ -205,15 +205,15 @@
2022-06-11
- https://nicksxs.me/2021/04/18/rust%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/
+ https://nicksxs.me/2021/04/18/rust%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0-%E6%89%80%E6%9C%89%E6%9D%83%E4%BA%8C/2022-06-11
- https://nicksxs.me/2022/01/30/spring-event-%E4%BB%8B%E7%BB%8D/
+ https://nicksxs.me/2021/04/18/rust%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/2022-06-11
- https://nicksxs.me/2021/04/18/rust%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0-%E6%89%80%E6%9C%89%E6%9D%83%E4%BA%8C/
+ https://nicksxs.me/2022/01/30/spring-event-%E4%BB%8B%E7%BB%8D/2022-06-11
@@ -225,11 +225,11 @@
2022-06-11
- https://nicksxs.me/2021/11/14/%E4%BB%8B%E7%BB%8D%E4%B8%8B%E6%9C%80%E8%BF%91%E6%AF%94%E8%BE%83%E5%AE%9E%E7%94%A8%E7%9A%84%E7%AB%AF%E5%8F%A3%E8%BD%AC%E5%8F%91/
+ https://nicksxs.me/2020/11/29/%E4%BB%8E%E6%B8%85%E5%8D%8E%E7%BE%8E%E9%99%A2%E5%AD%A6%E5%A7%90%E8%81%8A%E8%81%8A%E6%88%91%E4%BB%AC%E8%BA%AB%E8%BE%B9%E7%9A%84%E6%81%B6%E4%BA%BA/2022-06-11
- https://nicksxs.me/2020/11/29/%E4%BB%8E%E6%B8%85%E5%8D%8E%E7%BE%8E%E9%99%A2%E5%AD%A6%E5%A7%90%E8%81%8A%E8%81%8A%E6%88%91%E4%BB%AC%E8%BA%AB%E8%BE%B9%E7%9A%84%E6%81%B6%E4%BA%BA/
+ https://nicksxs.me/2021/11/14/%E4%BB%8B%E7%BB%8D%E4%B8%8B%E6%9C%80%E8%BF%91%E6%AF%94%E8%BE%83%E5%AE%9E%E7%94%A8%E7%9A%84%E7%AB%AF%E5%8F%A3%E8%BD%AC%E5%8F%91/2022-06-11
@@ -237,7 +237,7 @@
2022-06-11
- https://nicksxs.me/2021/09/12/%E8%81%8A%E4%B8%80%E4%B8%8B-RocketMQ-%E7%9A%84%E6%B6%88%E6%81%AF%E5%AD%98%E5%82%A8%E4%BA%8C/
+ https://nicksxs.me/2021/09/04/%E8%81%8A%E4%B8%80%E4%B8%8B-RocketMQ-%E7%9A%84%E6%B6%88%E6%81%AF%E5%AD%98%E5%82%A8/2022-06-11
@@ -245,11 +245,7 @@
2022-06-11
- https://nicksxs.me/2021/09/04/%E8%81%8A%E4%B8%80%E4%B8%8B-RocketMQ-%E7%9A%84%E6%B6%88%E6%81%AF%E5%AD%98%E5%82%A8/
- 2022-06-11
-
-
- https://nicksxs.me/2021/09/26/%E8%81%8A%E4%B8%80%E4%B8%8B-SpringBoot-%E4%B8%AD%E5%8A%A8%E6%80%81%E5%88%87%E6%8D%A2%E6%95%B0%E6%8D%AE%E6%BA%90%E7%9A%84%E6%96%B9%E6%B3%95/
+ https://nicksxs.me/2021/09/12/%E8%81%8A%E4%B8%80%E4%B8%8B-RocketMQ-%E7%9A%84%E6%B6%88%E6%81%AF%E5%AD%98%E5%82%A8%E4%BA%8C/2022-06-11
@@ -261,11 +257,11 @@
2022-06-11
- https://nicksxs.me/2020/11/22/%E8%81%8A%E8%81%8A-Dubbo-%E7%9A%84%E5%AE%B9%E9%94%99%E6%9C%BA%E5%88%B6/
+ https://nicksxs.me/2021/09/26/%E8%81%8A%E4%B8%80%E4%B8%8B-SpringBoot-%E4%B8%AD%E5%8A%A8%E6%80%81%E5%88%87%E6%8D%A2%E6%95%B0%E6%8D%AE%E6%BA%90%E7%9A%84%E6%96%B9%E6%B3%95/2022-06-11
- https://nicksxs.me/2021/06/27/%E8%81%8A%E8%81%8A-Java-%E4%B8%AD%E7%BB%95%E4%B8%8D%E5%BC%80%E7%9A%84-Synchronized-%E5%85%B3%E9%94%AE%E5%AD%97-%E4%BA%8C/
+ https://nicksxs.me/2020/11/22/%E8%81%8A%E8%81%8A-Dubbo-%E7%9A%84%E5%AE%B9%E9%94%99%E6%9C%BA%E5%88%B6/2022-06-11
@@ -285,15 +281,15 @@
2022-06-11
- https://nicksxs.me/2021/12/26/%E8%81%8A%E8%81%8A-Sharding-Jdbc-%E7%9A%84%E7%AE%80%E5%8D%95%E5%8E%9F%E7%90%86%E5%88%9D%E7%AF%87/
+ https://nicksxs.me/2021/12/12/%E8%81%8A%E8%81%8A-Sharding-Jdbc-%E7%9A%84%E7%AE%80%E5%8D%95%E4%BD%BF%E7%94%A8/2022-06-11
- https://nicksxs.me/2021/04/04/%E8%81%8A%E8%81%8A-dubbo-%E7%9A%84%E7%BA%BF%E7%A8%8B%E6%B1%A0/
+ https://nicksxs.me/2021/12/26/%E8%81%8A%E8%81%8A-Sharding-Jdbc-%E7%9A%84%E7%AE%80%E5%8D%95%E5%8E%9F%E7%90%86%E5%88%9D%E7%AF%87/2022-06-11
- https://nicksxs.me/2021/12/12/%E8%81%8A%E8%81%8A-Sharding-Jdbc-%E7%9A%84%E7%AE%80%E5%8D%95%E4%BD%BF%E7%94%A8/
+ https://nicksxs.me/2021/04/04/%E8%81%8A%E8%81%8A-dubbo-%E7%9A%84%E7%BA%BF%E7%A8%8B%E6%B1%A0/2022-06-11
@@ -304,6 +300,10 @@
https://nicksxs.me/2021/05/30/%E8%81%8A%E8%81%8A%E4%BC%A0%E8%AF%B4%E4%B8%AD%E7%9A%84-ThreadLocal/2022-06-11
+
+ https://nicksxs.me/2021/06/27/%E8%81%8A%E8%81%8A-Java-%E4%B8%AD%E7%BB%95%E4%B8%8D%E5%BC%80%E7%9A%84-Synchronized-%E5%85%B3%E9%94%AE%E5%AD%97-%E4%BA%8C/
+ 2022-06-11
+ https://nicksxs.me/2021/12/05/%E8%81%8A%E8%81%8A%E9%83%A8%E5%88%86%E5%85%AC%E4%BA%A4%E8%BD%A6%E7%9A%84%E8%AE%BE%E8%AE%A1bug/2022-06-11
@@ -676,6 +676,10 @@
https://nicksxs.me/2015/04/14/Add-Two-Number/2020-01-12
+
+ https://nicksxs.me/2014/12/24/MFC%20%E6%A8%A1%E6%80%81%E5%AF%B9%E8%AF%9D%E6%A1%86/
+ 2020-01-12
+ https://nicksxs.me/2019/12/10/Redis-Part-1/2020-01-12
@@ -689,7 +693,7 @@
2020-01-12
- https://nicksxs.me/2014/12/24/MFC%20%E6%A8%A1%E6%80%81%E5%AF%B9%E8%AF%9D%E6%A1%86/
+ https://nicksxs.me/2015/01/14/Two-Sum/2020-01-12
@@ -700,10 +704,6 @@
https://nicksxs.me/2016/08/14/docker-mysql-cluster/2020-01-12
-
- https://nicksxs.me/2015/01/14/Two-Sum/
- 2020-01-12
- https://nicksxs.me/2017/05/09/ambari-summary/2020-01-12
@@ -733,11 +733,11 @@
2020-01-12
- https://nicksxs.me/2014/12/30/Clone-Graph-Part-I/
+ https://nicksxs.me/2019/09/23/AbstractQueuedSynchronizer/2020-01-12
- https://nicksxs.me/2019/09/23/AbstractQueuedSynchronizer/
+ https://nicksxs.me/2014/12/30/Clone-Graph-Part-I/2020-01-12
@@ -773,11 +773,11 @@
2020-01-12
- https://nicksxs.me/2016/10/12/summary-ranges-228/
+ https://nicksxs.me/2016/07/13/swoole-websocket-test/2020-01-12
- https://nicksxs.me/2016/07/13/swoole-websocket-test/
+ https://nicksxs.me/2016/10/12/summary-ranges-228/2020-01-12
diff --git a/leancloud_counter_security_urls.json b/leancloud_counter_security_urls.json
index 70cad39f99..c82df1726e 100644
--- a/leancloud_counter_security_urls.json
+++ b/leancloud_counter_security_urls.json
@@ -1 +1 @@
-[{"title":"村上春树《1Q84》读后感","url":"/2019/12/18/1Q84读后感/"},{"title":"2019年终总结","url":"/2020/02/01/2019年终总结/"},{"title":"2020 年终总结","url":"/2021/03/31/2020-年终总结/"},{"title":"2020年中总结","url":"/2020/07/11/2020年中总结/"},{"title":"2021 年中总结","url":"/2021/07/18/2021-年中总结/"},{"title":"2021 年终总结","url":"/2022/01/22/2021-年终总结/"},{"title":"34_Search_for_a_Range","url":"/2016/08/14/34-Search-for-a-Range/"},{"title":"AQS篇二 之 Condition 浅析笔记","url":"/2021/02/21/AQS-之-Condition-浅析笔记/"},{"title":"AQS篇一","url":"/2021/02/14/AQS篇一/"},{"title":"add-two-number","url":"/2015/04/14/Add-Two-Number/"},{"title":"Apollo 客户端启动过程分析","url":"/2022/09/18/Apollo-客户端启动过程分析/"},{"title":"Apollo 的 value 注解是怎么自动更新的","url":"/2020/11/01/Apollo-的-value-注解是怎么自动更新的/"},{"title":"Apollo 如何获取当前环境","url":"/2022/09/04/Apollo-如何获取当前环境/"},{"title":"Clone Graph Part I","url":"/2014/12/30/Clone-Graph-Part-I/"},{"title":"AbstractQueuedSynchronizer","url":"/2019/09/23/AbstractQueuedSynchronizer/"},{"title":"Comparator使用小记","url":"/2020/04/05/Comparator使用小记/"},{"title":"Disruptor 系列一","url":"/2022/02/13/Disruptor-系列一/"},{"title":"Disruptor 系列二","url":"/2022/02/27/Disruptor-系列二/"},{"title":"Dubbo 使用的几个记忆点","url":"/2022/04/02/Dubbo-使用的几个记忆点/"},{"title":"G1收集器概述","url":"/2020/02/09/G1收集器概述/"},{"title":"2022 年终总结","url":"/2023/01/15/2022-年终总结/"},{"title":"Filter, Interceptor, Aop, 啥, 啥, 啥? 这些都是啥?","url":"/2020/08/22/Filter-Intercepter-Aop-啥-啥-啥-这些都是啥/"},{"title":"Leetcode 021 合并两个有序链表 ( Merge Two Sorted Lists ) 题解分析","url":"/2021/10/07/Leetcode-021-合并两个有序链表-Merge-Two-Sorted-Lists-题解分析/"},{"title":"JVM源码分析之G1垃圾收集器分析一","url":"/2019/12/07/JVM-G1-Part-1/"},{"title":"Leetcode 053 最大子序和 ( Maximum Subarray ) 题解分析","url":"/2021/11/28/Leetcode-053-最大子序和-Maximum-Subarray-题解分析/"},{"title":"Leetcode 028 实现 strStr() ( Implement strStr() ) 题解分析","url":"/2021/10/31/Leetcode-028-实现-strStr-Implement-strStr-题解分析/"},{"title":"Leetcode 104 二叉树的最大深度(Maximum Depth of Binary Tree) 题解分析","url":"/2020/10/25/Leetcode-104-二叉树的最大深度-Maximum-Depth-of-Binary-Tree-题解分析/"},{"title":"Leetcode 105 从前序与中序遍历序列构造二叉树(Construct Binary Tree from Preorder and Inorder Traversal) 题解分析","url":"/2020/12/13/Leetcode-105-从前序与中序遍历序列构造二叉树-Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal-题解分析/"},{"title":"Disruptor 系列三","url":"/2022/09/25/Disruptor-系列三/"},{"title":"Leetcode 1115 交替打印 FooBar ( Print FooBar Alternately *Medium* ) 题解分析","url":"/2022/05/01/Leetcode-1115-交替打印-FooBar-Print-FooBar-Alternately-Medium-题解分析/"},{"title":"Leetcode 121 买卖股票的最佳时机(Best Time to Buy and Sell Stock) 题解分析","url":"/2021/03/14/Leetcode-121-买卖股票的最佳时机-Best-Time-to-Buy-and-Sell-Stock-题解分析/"},{"title":"Headscale初体验以及踩坑记","url":"/2023/01/22/Headscale初体验以及踩坑记/"},{"title":"Leetcode 124 二叉树中的最大路径和(Binary Tree Maximum Path Sum) 题解分析","url":"/2021/01/24/Leetcode-124-二叉树中的最大路径和-Binary-Tree-Maximum-Path-Sum-题解分析/"},{"title":"Leetcode 1260 二维网格迁移 ( Shift 2D Grid *Easy* ) 题解分析","url":"/2022/07/22/Leetcode-1260-二维网格迁移-Shift-2D-Grid-Easy-题解分析/"},{"title":"Leetcode 155 最小栈(Min Stack) 题解分析","url":"/2020/12/06/Leetcode-155-最小栈-Min-Stack-题解分析/"},{"title":"Leetcode 16 最接近的三数之和 ( 3Sum Closest *Medium* ) 题解分析","url":"/2022/08/06/Leetcode-16-最接近的三数之和-3Sum-Closest-Medium-题解分析/"},{"title":"Leetcode 160 相交链表(intersection-of-two-linked-lists) 题解分析","url":"/2021/01/10/Leetcode-160-相交链表-intersection-of-two-linked-lists-题解分析/"},{"title":"Leetcode 20 有效的括号 ( Valid Parentheses *Easy* ) 题解分析","url":"/2022/07/02/Leetcode-20-有效的括号-Valid-Parentheses-Easy-题解分析/"},{"title":"Leetcode 1862 向下取整数对和 ( Sum of Floored Pairs *Hard* ) 题解分析","url":"/2022/09/11/Leetcode-1862-向下取整数对和-Sum-of-Floored-Pairs-Hard-题解分析/"},{"title":"Leetcode 234 回文链表(Palindrome Linked List) 题解分析","url":"/2020/11/15/Leetcode-234-回文联表-Palindrome-Linked-List-题解分析/"},{"title":"Leetcode 236 二叉树的最近公共祖先(Lowest Common Ancestor of a Binary Tree) 题解分析","url":"/2021/05/23/Leetcode-236-二叉树的最近公共祖先-Lowest-Common-Ancestor-of-a-Binary-Tree-题解分析/"},{"title":"Leetcode 278 第一个错误的版本 ( First Bad Version *Easy* ) 题解分析","url":"/2022/08/14/Leetcode-278-第一个错误的版本-First-Bad-Version-Easy-题解分析/"},{"title":"Leetcode 3 Longest Substring Without Repeating Characters 题解分析","url":"/2020/09/20/Leetcode-3-Longest-Substring-Without-Repeating-Characters-题解分析/"},{"title":"Leetcode 349 两个数组的交集 ( Intersection of Two Arrays *Easy* ) 题解分析","url":"/2022/03/07/Leetcode-349-两个数组的交集-Intersection-of-Two-Arrays-Easy-题解分析/"},{"title":"Leetcode 2 Add Two Numbers 题解分析","url":"/2020/10/11/Leetcode-2-Add-Two-Numbers-题解分析/"},{"title":"Leetcode 4 寻找两个正序数组的中位数 ( Median of Two Sorted Arrays *Hard* ) 题解分析","url":"/2022/03/27/Leetcode-4-寻找两个正序数组的中位数-Median-of-Two-Sorted-Arrays-Hard-题解分析/"},{"title":"Leetcode 42 接雨水 (Trapping Rain Water) 题解分析","url":"/2021/07/04/Leetcode-42-接雨水-Trapping-Rain-Water-题解分析/"},{"title":"Leetcode 48 旋转图像(Rotate Image) 题解分析","url":"/2021/05/01/Leetcode-48-旋转图像-Rotate-Image-题解分析/"},{"title":"Leetcode 885 螺旋矩阵 III ( Spiral Matrix III *Medium* ) 题解分析","url":"/2022/08/23/Leetcode-885-螺旋矩阵-III-Spiral-Matrix-III-Medium-题解分析/"},{"title":"Leetcode 698 划分为k个相等的子集 ( Partition to K Equal Sum Subsets *Medium* ) 题解分析","url":"/2022/06/19/Leetcode-698-划分为k个相等的子集-Partition-to-K-Equal-Sum-Subsets-Medium-题解分析/"},{"title":"Leetcode 83 删除排序链表中的重复元素 ( Remove Duplicates from Sorted List *Easy* ) 题解分析","url":"/2022/03/13/Leetcode-83-删除排序链表中的重复元素-Remove-Duplicates-from-Sorted-List-Easy-题解分析/"},{"title":"Linux 下 grep 命令的一点小技巧","url":"/2020/08/06/Linux-下-grep-命令的一点小技巧/"},{"title":"leetcode no.3","url":"/2015/04/15/Leetcode-No-3/"},{"title":"Number of 1 Bits","url":"/2015/03/11/Number-Of-1-Bits/"},{"title":"Maven实用小技巧","url":"/2020/02/16/Maven实用小技巧/"},{"title":"Redis_分布式锁","url":"/2019/12/10/Redis-Part-1/"},{"title":"Reverse Bits","url":"/2015/03/11/Reverse-Bits/"},{"title":"Reverse Integer","url":"/2015/03/13/Reverse-Integer/"},{"title":"MFC 模态对话框","url":"/2014/12/24/MFC 模态对话框/"},{"title":"Path Sum","url":"/2015/01/04/Path-Sum/"},{"title":"binary-watch","url":"/2016/09/29/binary-watch/"},{"title":"docker-mysql-cluster","url":"/2016/08/14/docker-mysql-cluster/"},{"title":"docker比一般多一点的初学者介绍","url":"/2020/03/08/docker比一般多一点的初学者介绍/"},{"title":"two sum","url":"/2015/01/14/Two-Sum/"},{"title":"ambari-summary","url":"/2017/05/09/ambari-summary/"},{"title":"docker比一般多一点的初学者介绍二","url":"/2020/03/15/docker比一般多一点的初学者介绍二/"},{"title":"dubbo 客户端配置的一个重要知识点","url":"/2022/06/11/dubbo-客户端配置的一个重要知识点/"},{"title":"docker比一般多一点的初学者介绍三","url":"/2020/03/21/docker比一般多一点的初学者介绍三/"},{"title":"docker使用中发现的echo命令的一个小技巧及其他","url":"/2020/03/29/echo命令的一个小技巧/"},{"title":"docker比一般多一点的初学者介绍四","url":"/2022/12/25/docker比一般多一点的初学者介绍四/"},{"title":"gogs使用webhook部署react单页应用","url":"/2020/02/22/gogs使用webhook部署react单页应用/"},{"title":"C++ 指针使用中的一个小问题","url":"/2014/12/23/my-new-post/"},{"title":"invert-binary-tree","url":"/2015/06/22/invert-binary-tree/"},{"title":"minimum-size-subarray-sum-209","url":"/2016/10/11/minimum-size-subarray-sum-209/"},{"title":"mybatis 的 $ 和 # 是有啥区别","url":"/2020/09/06/mybatis-的-和-是有啥区别/"},{"title":"mybatis 的 foreach 使用的注意点","url":"/2022/07/09/mybatis-的-foreach-使用的注意点/"},{"title":"Leetcode 747 至少是其他数字两倍的最大数 ( Largest Number At Least Twice of Others *Easy* ) 题解分析","url":"/2022/10/02/Leetcode-747-至少是其他数字两倍的最大数-Largest-Number-At-Least-Twice-of-Others-Easy-题解分析/"},{"title":"mybatis 的缓存是怎么回事","url":"/2020/10/03/mybatis-的缓存是怎么回事/"},{"title":"mybatis系列-mybatis是如何初始化mapper的","url":"/2022/12/04/mybatis是如何初始化mapper的/"},{"title":"mybatis系列-dataSource解析","url":"/2023/01/08/mybatis系列-dataSource解析/"},{"title":"mybatis系列-第一条sql的更多细节","url":"/2022/12/18/mybatis系列-第一条sql的更多细节/"},{"title":"nginx 日志小记","url":"/2022/04/17/nginx-日志小记/"},{"title":"openresty","url":"/2019/06/18/openresty/"},{"title":"pcre-intro-and-a-simple-package","url":"/2015/01/16/pcre-intro-and-a-simple-package/"},{"title":"mybatis系列-入门篇","url":"/2022/11/27/mybatis系列-入门篇/"},{"title":"php-abstract-class-and-interface","url":"/2016/11/10/php-abstract-class-and-interface/"},{"title":"mybatis系列-typeAliases系统","url":"/2023/01/01/mybatis系列-typeAliases系统/"},{"title":"rabbitmq-tips","url":"/2017/04/25/rabbitmq-tips/"},{"title":"redis数据结构介绍-第一部分 SDS,链表,字典","url":"/2019/12/26/redis数据结构介绍/"},{"title":"redis数据结构介绍三-第三部分 整数集合","url":"/2020/01/10/redis数据结构介绍三/"},{"title":"redis 的 rdb 和 COW 介绍","url":"/2021/08/15/redis-的-rdb-和-COW-介绍/"},{"title":"redis数据结构介绍二-第二部分 跳表","url":"/2020/01/04/redis数据结构介绍二/"},{"title":"redis数据结构介绍五-第五部分 对象","url":"/2020/01/20/redis数据结构介绍五/"},{"title":"mybatis系列-第一条sql的细节","url":"/2022/12/11/mybatis系列-第一条sql的细节/"},{"title":"redis数据结构介绍六 快表","url":"/2020/01/22/redis数据结构介绍六/"},{"title":"redis数据结构介绍四-第四部分 压缩表","url":"/2020/01/19/redis数据结构介绍四/"},{"title":"redis淘汰策略复习","url":"/2021/08/01/redis淘汰策略复习/"},{"title":"redis系列介绍七-过期策略","url":"/2020/04/12/redis系列介绍七/"},{"title":"redis系列介绍八-淘汰策略","url":"/2020/04/18/redis系列介绍八/"},{"title":"redis过期策略复习","url":"/2021/07/25/redis过期策略复习/"},{"title":"rust学习笔记-所有权三之切片","url":"/2021/05/16/rust学习笔记-所有权三之切片/"},{"title":"rust学习笔记-所有权一","url":"/2021/04/18/rust学习笔记/"},{"title":"spark-little-tips","url":"/2017/03/28/spark-little-tips/"},{"title":"spring event 介绍","url":"/2022/01/30/spring-event-介绍/"},{"title":"rust学习笔记-所有权二","url":"/2021/04/18/rust学习笔记-所有权二/"},{"title":"summary-ranges-228","url":"/2016/10/12/summary-ranges-228/"},{"title":"wordpress 忘记密码的一种解决方法","url":"/2021/12/05/wordpress-忘记密码的一种解决方法/"},{"title":"《垃圾回收算法手册读书》笔记之整理算法","url":"/2021/03/07/《垃圾回收算法手册读书》笔记之整理算法/"},{"title":"powershell 初体验","url":"/2022/11/13/powershell-初体验/"},{"title":"《长安的荔枝》读后感","url":"/2022/07/17/《长安的荔枝》读后感/"},{"title":"一个 nginx 的简单记忆点","url":"/2022/08/21/一个-nginx-的简单记忆点/"},{"title":"swoole-websocket-test","url":"/2016/07/13/swoole-websocket-test/"},{"title":"powershell 初体验二","url":"/2022/11/20/powershell-初体验二/"},{"title":"上次的其他 外行聊国足","url":"/2022/03/06/上次的其他-外行聊国足/"},{"title":"介绍一下 RocketMQ","url":"/2020/06/21/介绍一下-RocketMQ/"},{"title":"介绍下最近比较实用的端口转发","url":"/2021/11/14/介绍下最近比较实用的端口转发/"},{"title":"关于公共交通再吐个槽","url":"/2021/03/21/关于公共交通再吐个槽/"},{"title":"从清华美院学姐聊聊我们身边的恶人","url":"/2020/11/29/从清华美院学姐聊聊我们身边的恶人/"},{"title":"分享一次折腾老旧笔记本的体验-续篇","url":"/2023/02/12/分享一次折腾老旧笔记本的体验-续篇/"},{"title":"分享一次折腾老旧笔记本的体验","url":"/2023/02/05/分享一次折腾老旧笔记本的体验/"},{"title":"从丁仲礼被美国制裁聊点啥","url":"/2020/12/20/从丁仲礼被美国制裁聊点啥/"},{"title":"关于读书打卡与分享","url":"/2021/02/07/关于读书打卡与分享/"},{"title":"分享记录一下一个 scp 操作方法","url":"/2022/02/06/分享记录一下一个-scp-操作方法/"},{"title":"分享记录一下一个 git 操作方法","url":"/2022/02/06/分享记录一下一个-git-操作方法/"},{"title":"周末我在老丈人家打了天小工","url":"/2020/08/16/周末我在老丈人家打了天小工/"},{"title":"在老丈人家的小工记三","url":"/2020/09/13/在老丈人家的小工记三/"},{"title":"在老丈人家的小工记五","url":"/2020/10/18/在老丈人家的小工记五/"},{"title":"在老丈人家的小工记四","url":"/2020/09/26/在老丈人家的小工记四/"},{"title":"分享一次比较诡异的 Windows 下 U盘无法退出的经历","url":"/2023/01/29/分享一次比较诡异的-Windows-下-U盘无法退出的经历/"},{"title":"寄生虫观后感","url":"/2020/03/01/寄生虫观后感/"},{"title":"屯菜惊魂记","url":"/2022/04/24/屯菜惊魂记/"},{"title":"我是如何走上跑步这条不归路的","url":"/2020/07/26/我是如何走上跑步这条不归路的/"},{"title":"是何原因竟让两人深夜奔袭十公里","url":"/2022/06/05/是何原因竟让两人深夜奔袭十公里/"},{"title":"搬运两个 StackOverflow 上的 Mysql 编码相关的问题解答","url":"/2022/01/16/搬运两个-StackOverflow-上的-Mysql-编码相关的问题解答/"},{"title":"看完了扫黑风暴,聊聊感想","url":"/2021/10/24/看完了扫黑风暴-聊聊感想/"},{"title":"聊一下 RocketMQ 的 DefaultMQPushConsumer 源码","url":"/2020/06/26/聊一下-RocketMQ-的-Consumer/"},{"title":"聊一下 RocketMQ 的 NameServer 源码","url":"/2020/07/05/聊一下-RocketMQ-的-NameServer-源码/"},{"title":"给小电驴上牌","url":"/2022/03/20/给小电驴上牌/"},{"title":"聊一下 RocketMQ 的消息存储二","url":"/2021/09/12/聊一下-RocketMQ-的消息存储二/"},{"title":"聊一下 RocketMQ 的消息存储三","url":"/2021/10/03/聊一下-RocketMQ-的消息存储三/"},{"title":"聊一下 RocketMQ 的消息存储之 MMAP","url":"/2021/09/04/聊一下-RocketMQ-的消息存储/"},{"title":"聊一下 SpringBoot 中动态切换数据源的方法","url":"/2021/09/26/聊一下-SpringBoot-中动态切换数据源的方法/"},{"title":"聊一下 RocketMQ 的顺序消息","url":"/2021/08/29/聊一下-RocketMQ-的顺序消息/"},{"title":"聊一下 RocketMQ 的消息存储四","url":"/2021/10/17/聊一下-RocketMQ-的消息存储四/"},{"title":"聊一下 SpringBoot 中使用的 cglib 作为动态代理中的一个注意点","url":"/2021/09/19/聊一下-SpringBoot-中使用的-cglib-作为动态代理中的一个注意点/"},{"title":"聊一下 SpringBoot 设置非 web 应用的方法","url":"/2022/07/31/聊一下-SpringBoot-设置非-web-应用的方法/"},{"title":"聊在东京奥运会闭幕式这天-二","url":"/2021/08/19/聊在东京奥运会闭幕式这天-二/"},{"title":"聊在东京奥运会闭幕式这天","url":"/2021/08/08/聊在东京奥运会闭幕式这天/"},{"title":"聊聊 Dubbo 的 SPI 续之自适应拓展","url":"/2020/06/06/聊聊-Dubbo-的-SPI-续之自适应拓展/"},{"title":"聊聊 Dubbo 的 SPI","url":"/2020/05/31/聊聊-Dubbo-的-SPI/"},{"title":"聊聊 Dubbo 的容错机制","url":"/2020/11/22/聊聊-Dubbo-的容错机制/"},{"title":"聊一下关于怎么陪伴学习","url":"/2022/11/06/聊一下关于怎么陪伴学习/"},{"title":"聊聊 Java 中绕不开的 Synchronized 关键字-二","url":"/2021/06/27/聊聊-Java-中绕不开的-Synchronized-关键字-二/"},{"title":"聊聊 Java 的 equals 和 hashCode 方法","url":"/2021/01/03/聊聊-Java-的-equals-和-hashCode-方法/"},{"title":"聊聊 Java 的类加载机制一","url":"/2020/11/08/聊聊-Java-的类加载机制/"},{"title":"聊聊 Java 中绕不开的 Synchronized 关键字","url":"/2021/06/20/聊聊-Java-中绕不开的-Synchronized-关键字/"},{"title":"聊聊 Java 的类加载机制二","url":"/2021/06/13/聊聊-Java-的类加载机制二/"},{"title":"聊聊 Java 自带的那些*逆天*工具","url":"/2020/08/02/聊聊-Java-自带的那些逆天工具/"},{"title":"聊聊 Linux 下的 top 命令","url":"/2021/03/28/聊聊-Linux-下的-top-命令/"},{"title":"聊聊 RocketMQ 的 Broker 源码","url":"/2020/07/19/聊聊-RocketMQ-的-Broker-源码/"},{"title":"聊聊 Sharding-Jdbc 分库分表下的分页方案","url":"/2022/01/09/聊聊-Sharding-Jdbc-分库分表下的分页方案/"},{"title":"聊聊 Sharding-Jdbc 的简单原理初篇","url":"/2021/12/26/聊聊-Sharding-Jdbc-的简单原理初篇/"},{"title":"聊聊 dubbo 的线程池","url":"/2021/04/04/聊聊-dubbo-的线程池/"},{"title":"聊聊 mysql 的 MVCC 续续篇之锁分析","url":"/2020/05/10/聊聊-mysql-的-MVCC-续续篇之加锁分析/"},{"title":"聊聊 Sharding-Jdbc 的简单使用","url":"/2021/12/12/聊聊-Sharding-Jdbc-的简单使用/"},{"title":"聊聊 mysql 的 MVCC 续篇","url":"/2020/05/02/聊聊-mysql-的-MVCC-续篇/"},{"title":"聊聊 mysql 索引的一些细节","url":"/2020/12/27/聊聊-mysql-索引的一些细节/"},{"title":"聊聊 redis 缓存的应用问题","url":"/2021/01/31/聊聊-redis-缓存的应用问题/"},{"title":"聊聊Java中的单例模式","url":"/2019/12/21/聊聊Java中的单例模式/"},{"title":"聊聊 mysql 的 MVCC","url":"/2020/04/26/聊聊-mysql-的-MVCC/"},{"title":"聊聊 SpringBoot 自动装配","url":"/2021/07/11/聊聊SpringBoot-自动装配/"},{"title":"聊聊传说中的 ThreadLocal","url":"/2021/05/30/聊聊传说中的-ThreadLocal/"},{"title":"聊聊一次 brew update 引发的血案","url":"/2020/06/13/聊聊一次-brew-update-引发的血案/"},{"title":"聊聊我理解的分布式事务","url":"/2020/05/17/聊聊我理解的分布式事务/"},{"title":"聊聊厦门旅游的好与不好","url":"/2021/04/11/聊聊厦门旅游的好与不好/"},{"title":"聊聊如何识别和意识到日常生活中的各类危险","url":"/2021/06/06/聊聊如何识别和意识到日常生活中的各类危险/"},{"title":"聊聊最近平淡的生活之又聊通勤","url":"/2021/11/07/聊聊最近平淡的生活/"},{"title":"聊聊我的远程工作体验","url":"/2022/06/26/聊聊我的远程工作体验/"},{"title":"聊聊我刚学会的应用诊断方法","url":"/2020/05/22/聊聊我刚学会的应用诊断方法/"},{"title":"聊聊最近平淡的生活之《花束般的恋爱》观后感","url":"/2021/12/31/聊聊最近平淡的生活之《花束般的恋爱》观后感/"},{"title":"聊聊最近平淡的生活之看看老剧","url":"/2021/11/21/聊聊最近平淡的生活之看看老剧/"},{"title":"聊聊最近平淡的生活之看《神探狄仁杰》","url":"/2021/12/19/聊聊最近平淡的生活之看《神探狄仁杰》/"},{"title":"聊聊那些加塞狗","url":"/2021/01/17/聊聊那些加塞狗/"},{"title":"聊聊给亲戚朋友的老电脑重装系统那些事儿","url":"/2021/05/09/聊聊给亲戚朋友的老电脑重装系统那些事儿/"},{"title":"聊聊这次换车牌及其他","url":"/2022/02/20/聊聊这次换车牌及其他/"},{"title":"聊聊部分公交车的设计bug","url":"/2021/12/05/聊聊部分公交车的设计bug/"},{"title":"记录下 phpunit 的入门使用方法之setUp和tearDown","url":"/2022/10/23/记录下-phpunit-的入门使用方法之setUp和tearDown/"},{"title":"记录下 Java Stream 的一些高效操作","url":"/2022/05/15/记录下-Java-Lambda-的一些高效操作/"},{"title":"重看了下《蛮荒记》说说感受","url":"/2021/10/10/重看了下《蛮荒记》说说感受/"},{"title":"闲话篇-也算碰到了为老不尊和坏人变老了的典型案例","url":"/2022/05/22/闲话篇-也算碰到了为老不尊和坏人变老了的典型案例/"},{"title":"闲聊下乘公交的用户体验","url":"/2021/02/28/闲聊下乘公交的用户体验/"},{"title":"这周末我又在老丈人家打了天小工","url":"/2020/08/30/这周末我又在老丈人家打了天小工/"},{"title":"闲话篇-路遇神逻辑骑车带娃爹","url":"/2022/05/08/闲话篇-路遇神逻辑骑车带娃爹/"},{"title":"难得的大扫除","url":"/2022/04/10/难得的大扫除/"},{"title":"记录下 redis 的一些使用方法","url":"/2022/10/30/记录下-redis-的一些使用方法/"},{"title":"记一个容器中 dubbo 注册的小知识点","url":"/2022/10/09/记一个容器中-dubbo-注册的小知识点/"},{"title":"记录下 zookeeper 集群迁移和易错点","url":"/2022/05/29/记录下-zookeeper-集群迁移/"},{"title":"记录下 phpunit 的入门使用方法","url":"/2022/10/16/记录下-phpunit-的入门使用方法/"}]
\ No newline at end of file
+[{"title":"村上春树《1Q84》读后感","url":"/2019/12/18/1Q84读后感/"},{"title":"2020年中总结","url":"/2020/07/11/2020年中总结/"},{"title":"2020 年终总结","url":"/2021/03/31/2020-年终总结/"},{"title":"2019年终总结","url":"/2020/02/01/2019年终总结/"},{"title":"2021 年中总结","url":"/2021/07/18/2021-年中总结/"},{"title":"2022 年终总结","url":"/2023/01/15/2022-年终总结/"},{"title":"34_Search_for_a_Range","url":"/2016/08/14/34-Search-for-a-Range/"},{"title":"2021 年终总结","url":"/2022/01/22/2021-年终总结/"},{"title":"AQS篇二 之 Condition 浅析笔记","url":"/2021/02/21/AQS-之-Condition-浅析笔记/"},{"title":"AQS篇一","url":"/2021/02/14/AQS篇一/"},{"title":"AbstractQueuedSynchronizer","url":"/2019/09/23/AbstractQueuedSynchronizer/"},{"title":"Apollo 如何获取当前环境","url":"/2022/09/04/Apollo-如何获取当前环境/"},{"title":"add-two-number","url":"/2015/04/14/Add-Two-Number/"},{"title":"Apollo 客户端启动过程分析","url":"/2022/09/18/Apollo-客户端启动过程分析/"},{"title":"Apollo 的 value 注解是怎么自动更新的","url":"/2020/11/01/Apollo-的-value-注解是怎么自动更新的/"},{"title":"Clone Graph Part I","url":"/2014/12/30/Clone-Graph-Part-I/"},{"title":"Comparator使用小记","url":"/2020/04/05/Comparator使用小记/"},{"title":"Disruptor 系列一","url":"/2022/02/13/Disruptor-系列一/"},{"title":"Dubbo 使用的几个记忆点","url":"/2022/04/02/Dubbo-使用的几个记忆点/"},{"title":"Disruptor 系列三","url":"/2022/09/25/Disruptor-系列三/"},{"title":"Filter, Interceptor, Aop, 啥, 啥, 啥? 这些都是啥?","url":"/2020/08/22/Filter-Intercepter-Aop-啥-啥-啥-这些都是啥/"},{"title":"G1收集器概述","url":"/2020/02/09/G1收集器概述/"},{"title":"Disruptor 系列二","url":"/2022/02/27/Disruptor-系列二/"},{"title":"Leetcode 021 合并两个有序链表 ( Merge Two Sorted Lists ) 题解分析","url":"/2021/10/07/Leetcode-021-合并两个有序链表-Merge-Two-Sorted-Lists-题解分析/"},{"title":"JVM源码分析之G1垃圾收集器分析一","url":"/2019/12/07/JVM-G1-Part-1/"},{"title":"Leetcode 028 实现 strStr() ( Implement strStr() ) 题解分析","url":"/2021/10/31/Leetcode-028-实现-strStr-Implement-strStr-题解分析/"},{"title":"Headscale初体验以及踩坑记","url":"/2023/01/22/Headscale初体验以及踩坑记/"},{"title":"Leetcode 053 最大子序和 ( Maximum Subarray ) 题解分析","url":"/2021/11/28/Leetcode-053-最大子序和-Maximum-Subarray-题解分析/"},{"title":"Leetcode 105 从前序与中序遍历序列构造二叉树(Construct Binary Tree from Preorder and Inorder Traversal) 题解分析","url":"/2020/12/13/Leetcode-105-从前序与中序遍历序列构造二叉树-Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal-题解分析/"},{"title":"Leetcode 104 二叉树的最大深度(Maximum Depth of Binary Tree) 题解分析","url":"/2020/10/25/Leetcode-104-二叉树的最大深度-Maximum-Depth-of-Binary-Tree-题解分析/"},{"title":"Leetcode 1115 交替打印 FooBar ( Print FooBar Alternately *Medium* ) 题解分析","url":"/2022/05/01/Leetcode-1115-交替打印-FooBar-Print-FooBar-Alternately-Medium-题解分析/"},{"title":"Leetcode 121 买卖股票的最佳时机(Best Time to Buy and Sell Stock) 题解分析","url":"/2021/03/14/Leetcode-121-买卖股票的最佳时机-Best-Time-to-Buy-and-Sell-Stock-题解分析/"},{"title":"Leetcode 124 二叉树中的最大路径和(Binary Tree Maximum Path Sum) 题解分析","url":"/2021/01/24/Leetcode-124-二叉树中的最大路径和-Binary-Tree-Maximum-Path-Sum-题解分析/"},{"title":"Leetcode 1260 二维网格迁移 ( Shift 2D Grid *Easy* ) 题解分析","url":"/2022/07/22/Leetcode-1260-二维网格迁移-Shift-2D-Grid-Easy-题解分析/"},{"title":"Leetcode 155 最小栈(Min Stack) 题解分析","url":"/2020/12/06/Leetcode-155-最小栈-Min-Stack-题解分析/"},{"title":"Leetcode 160 相交链表(intersection-of-two-linked-lists) 题解分析","url":"/2021/01/10/Leetcode-160-相交链表-intersection-of-two-linked-lists-题解分析/"},{"title":"Leetcode 16 最接近的三数之和 ( 3Sum Closest *Medium* ) 题解分析","url":"/2022/08/06/Leetcode-16-最接近的三数之和-3Sum-Closest-Medium-题解分析/"},{"title":"Leetcode 2 Add Two Numbers 题解分析","url":"/2020/10/11/Leetcode-2-Add-Two-Numbers-题解分析/"},{"title":"Leetcode 1862 向下取整数对和 ( Sum of Floored Pairs *Hard* ) 题解分析","url":"/2022/09/11/Leetcode-1862-向下取整数对和-Sum-of-Floored-Pairs-Hard-题解分析/"},{"title":"Leetcode 20 有效的括号 ( Valid Parentheses *Easy* ) 题解分析","url":"/2022/07/02/Leetcode-20-有效的括号-Valid-Parentheses-Easy-题解分析/"},{"title":"Leetcode 234 回文链表(Palindrome Linked List) 题解分析","url":"/2020/11/15/Leetcode-234-回文联表-Palindrome-Linked-List-题解分析/"},{"title":"Leetcode 236 二叉树的最近公共祖先(Lowest Common Ancestor of a Binary Tree) 题解分析","url":"/2021/05/23/Leetcode-236-二叉树的最近公共祖先-Lowest-Common-Ancestor-of-a-Binary-Tree-题解分析/"},{"title":"Leetcode 278 第一个错误的版本 ( First Bad Version *Easy* ) 题解分析","url":"/2022/08/14/Leetcode-278-第一个错误的版本-First-Bad-Version-Easy-题解分析/"},{"title":"Leetcode 349 两个数组的交集 ( Intersection of Two Arrays *Easy* ) 题解分析","url":"/2022/03/07/Leetcode-349-两个数组的交集-Intersection-of-Two-Arrays-Easy-题解分析/"},{"title":"Leetcode 4 寻找两个正序数组的中位数 ( Median of Two Sorted Arrays *Hard* ) 题解分析","url":"/2022/03/27/Leetcode-4-寻找两个正序数组的中位数-Median-of-Two-Sorted-Arrays-Hard-题解分析/"},{"title":"Leetcode 3 Longest Substring Without Repeating Characters 题解分析","url":"/2020/09/20/Leetcode-3-Longest-Substring-Without-Repeating-Characters-题解分析/"},{"title":"Leetcode 42 接雨水 (Trapping Rain Water) 题解分析","url":"/2021/07/04/Leetcode-42-接雨水-Trapping-Rain-Water-题解分析/"},{"title":"Leetcode 48 旋转图像(Rotate Image) 题解分析","url":"/2021/05/01/Leetcode-48-旋转图像-Rotate-Image-题解分析/"},{"title":"Leetcode 698 划分为k个相等的子集 ( Partition to K Equal Sum Subsets *Medium* ) 题解分析","url":"/2022/06/19/Leetcode-698-划分为k个相等的子集-Partition-to-K-Equal-Sum-Subsets-Medium-题解分析/"},{"title":"Leetcode 747 至少是其他数字两倍的最大数 ( Largest Number At Least Twice of Others *Easy* ) 题解分析","url":"/2022/10/02/Leetcode-747-至少是其他数字两倍的最大数-Largest-Number-At-Least-Twice-of-Others-Easy-题解分析/"},{"title":"Leetcode 83 删除排序链表中的重复元素 ( Remove Duplicates from Sorted List *Easy* ) 题解分析","url":"/2022/03/13/Leetcode-83-删除排序链表中的重复元素-Remove-Duplicates-from-Sorted-List-Easy-题解分析/"},{"title":"leetcode no.3","url":"/2015/04/15/Leetcode-No-3/"},{"title":"Leetcode 885 螺旋矩阵 III ( Spiral Matrix III *Medium* ) 题解分析","url":"/2022/08/23/Leetcode-885-螺旋矩阵-III-Spiral-Matrix-III-Medium-题解分析/"},{"title":"Linux 下 grep 命令的一点小技巧","url":"/2020/08/06/Linux-下-grep-命令的一点小技巧/"},{"title":"MFC 模态对话框","url":"/2014/12/24/MFC 模态对话框/"},{"title":"Maven实用小技巧","url":"/2020/02/16/Maven实用小技巧/"},{"title":"Number of 1 Bits","url":"/2015/03/11/Number-Of-1-Bits/"},{"title":"Redis_分布式锁","url":"/2019/12/10/Redis-Part-1/"},{"title":"Reverse Bits","url":"/2015/03/11/Reverse-Bits/"},{"title":"Reverse Integer","url":"/2015/03/13/Reverse-Integer/"},{"title":"two sum","url":"/2015/01/14/Two-Sum/"},{"title":"binary-watch","url":"/2016/09/29/binary-watch/"},{"title":"docker-mysql-cluster","url":"/2016/08/14/docker-mysql-cluster/"},{"title":"Path Sum","url":"/2015/01/04/Path-Sum/"},{"title":"ambari-summary","url":"/2017/05/09/ambari-summary/"},{"title":"docker比一般多一点的初学者介绍","url":"/2020/03/08/docker比一般多一点的初学者介绍/"},{"title":"docker比一般多一点的初学者介绍三","url":"/2020/03/21/docker比一般多一点的初学者介绍三/"},{"title":"docker比一般多一点的初学者介绍四","url":"/2022/12/25/docker比一般多一点的初学者介绍四/"},{"title":"dubbo 客户端配置的一个重要知识点","url":"/2022/06/11/dubbo-客户端配置的一个重要知识点/"},{"title":"docker使用中发现的echo命令的一个小技巧及其他","url":"/2020/03/29/echo命令的一个小技巧/"},{"title":"gogs使用webhook部署react单页应用","url":"/2020/02/22/gogs使用webhook部署react单页应用/"},{"title":"minimum-size-subarray-sum-209","url":"/2016/10/11/minimum-size-subarray-sum-209/"},{"title":"C++ 指针使用中的一个小问题","url":"/2014/12/23/my-new-post/"},{"title":"invert-binary-tree","url":"/2015/06/22/invert-binary-tree/"},{"title":"mybatis 的 foreach 使用的注意点","url":"/2022/07/09/mybatis-的-foreach-使用的注意点/"},{"title":"mybatis 的 $ 和 # 是有啥区别","url":"/2020/09/06/mybatis-的-和-是有啥区别/"},{"title":"mybatis 的缓存是怎么回事","url":"/2020/10/03/mybatis-的缓存是怎么回事/"},{"title":"mybatis系列-dataSource解析","url":"/2023/01/08/mybatis系列-dataSource解析/"},{"title":"mybatis系列-mybatis是如何初始化mapper的","url":"/2022/12/04/mybatis是如何初始化mapper的/"},{"title":"mybatis系列-typeAliases系统","url":"/2023/01/01/mybatis系列-typeAliases系统/"},{"title":"docker比一般多一点的初学者介绍二","url":"/2020/03/15/docker比一般多一点的初学者介绍二/"},{"title":"mybatis系列-第一条sql的细节","url":"/2022/12/11/mybatis系列-第一条sql的细节/"},{"title":"mybatis系列-第一条sql的更多细节","url":"/2022/12/18/mybatis系列-第一条sql的更多细节/"},{"title":"mybatis系列-入门篇","url":"/2022/11/27/mybatis系列-入门篇/"},{"title":"pcre-intro-and-a-simple-package","url":"/2015/01/16/pcre-intro-and-a-simple-package/"},{"title":"powershell 初体验","url":"/2022/11/13/powershell-初体验/"},{"title":"openresty","url":"/2019/06/18/openresty/"},{"title":"php-abstract-class-and-interface","url":"/2016/11/10/php-abstract-class-and-interface/"},{"title":"rabbitmq-tips","url":"/2017/04/25/rabbitmq-tips/"},{"title":"nginx 日志小记","url":"/2022/04/17/nginx-日志小记/"},{"title":"redis 的 rdb 和 COW 介绍","url":"/2021/08/15/redis-的-rdb-和-COW-介绍/"},{"title":"redis数据结构介绍-第一部分 SDS,链表,字典","url":"/2019/12/26/redis数据结构介绍/"},{"title":"powershell 初体验二","url":"/2022/11/20/powershell-初体验二/"},{"title":"redis数据结构介绍三-第三部分 整数集合","url":"/2020/01/10/redis数据结构介绍三/"},{"title":"redis数据结构介绍二-第二部分 跳表","url":"/2020/01/04/redis数据结构介绍二/"},{"title":"redis数据结构介绍六 快表","url":"/2020/01/22/redis数据结构介绍六/"},{"title":"redis数据结构介绍五-第五部分 对象","url":"/2020/01/20/redis数据结构介绍五/"},{"title":"redis数据结构介绍四-第四部分 压缩表","url":"/2020/01/19/redis数据结构介绍四/"},{"title":"redis系列介绍七-过期策略","url":"/2020/04/12/redis系列介绍七/"},{"title":"redis淘汰策略复习","url":"/2021/08/01/redis淘汰策略复习/"},{"title":"redis过期策略复习","url":"/2021/07/25/redis过期策略复习/"},{"title":"rust学习笔记-所有权二","url":"/2021/04/18/rust学习笔记-所有权二/"},{"title":"redis系列介绍八-淘汰策略","url":"/2020/04/18/redis系列介绍八/"},{"title":"rust学习笔记-所有权一","url":"/2021/04/18/rust学习笔记/"},{"title":"spark-little-tips","url":"/2017/03/28/spark-little-tips/"},{"title":"spring event 介绍","url":"/2022/01/30/spring-event-介绍/"},{"title":"rust学习笔记-所有权三之切片","url":"/2021/05/16/rust学习笔记-所有权三之切片/"},{"title":"swoole-websocket-test","url":"/2016/07/13/swoole-websocket-test/"},{"title":"wordpress 忘记密码的一种解决方法","url":"/2021/12/05/wordpress-忘记密码的一种解决方法/"},{"title":"《垃圾回收算法手册读书》笔记之整理算法","url":"/2021/03/07/《垃圾回收算法手册读书》笔记之整理算法/"},{"title":"《长安的荔枝》读后感","url":"/2022/07/17/《长安的荔枝》读后感/"},{"title":"一个 nginx 的简单记忆点","url":"/2022/08/21/一个-nginx-的简单记忆点/"},{"title":"上次的其他 外行聊国足","url":"/2022/03/06/上次的其他-外行聊国足/"},{"title":"summary-ranges-228","url":"/2016/10/12/summary-ranges-228/"},{"title":"介绍一下 RocketMQ","url":"/2020/06/21/介绍一下-RocketMQ/"},{"title":"从丁仲礼被美国制裁聊点啥","url":"/2020/12/20/从丁仲礼被美国制裁聊点啥/"},{"title":"从清华美院学姐聊聊我们身边的恶人","url":"/2020/11/29/从清华美院学姐聊聊我们身边的恶人/"},{"title":"介绍下最近比较实用的端口转发","url":"/2021/11/14/介绍下最近比较实用的端口转发/"},{"title":"关于公共交通再吐个槽","url":"/2021/03/21/关于公共交通再吐个槽/"},{"title":"分享一次折腾老旧笔记本的体验","url":"/2023/02/05/分享一次折腾老旧笔记本的体验/"},{"title":"关于读书打卡与分享","url":"/2021/02/07/关于读书打卡与分享/"},{"title":"分享一次折腾老旧笔记本的体验-续篇","url":"/2023/02/12/分享一次折腾老旧笔记本的体验-续篇/"},{"title":"分享一次比较诡异的 Windows 下 U盘无法退出的经历","url":"/2023/01/29/分享一次比较诡异的-Windows-下-U盘无法退出的经历/"},{"title":"分享记录一下一个 git 操作方法","url":"/2022/02/06/分享记录一下一个-git-操作方法/"},{"title":"周末我在老丈人家打了天小工","url":"/2020/08/16/周末我在老丈人家打了天小工/"},{"title":"在老丈人家的小工记五","url":"/2020/10/18/在老丈人家的小工记五/"},{"title":"在老丈人家的小工记四","url":"/2020/09/26/在老丈人家的小工记四/"},{"title":"寄生虫观后感","url":"/2020/03/01/寄生虫观后感/"},{"title":"屯菜惊魂记","url":"/2022/04/24/屯菜惊魂记/"},{"title":"分享记录一下一个 scp 操作方法","url":"/2022/02/06/分享记录一下一个-scp-操作方法/"},{"title":"我是如何走上跑步这条不归路的","url":"/2020/07/26/我是如何走上跑步这条不归路的/"},{"title":"是何原因竟让两人深夜奔袭十公里","url":"/2022/06/05/是何原因竟让两人深夜奔袭十公里/"},{"title":"搬运两个 StackOverflow 上的 Mysql 编码相关的问题解答","url":"/2022/01/16/搬运两个-StackOverflow-上的-Mysql-编码相关的问题解答/"},{"title":"看完了扫黑风暴,聊聊感想","url":"/2021/10/24/看完了扫黑风暴-聊聊感想/"},{"title":"聊一下 RocketMQ 的 DefaultMQPushConsumer 源码","url":"/2020/06/26/聊一下-RocketMQ-的-Consumer/"},{"title":"聊一下 RocketMQ 的 NameServer 源码","url":"/2020/07/05/聊一下-RocketMQ-的-NameServer-源码/"},{"title":"聊一下 RocketMQ 的消息存储之 MMAP","url":"/2021/09/04/聊一下-RocketMQ-的消息存储/"},{"title":"在老丈人家的小工记三","url":"/2020/09/13/在老丈人家的小工记三/"},{"title":"聊一下 RocketMQ 的消息存储三","url":"/2021/10/03/聊一下-RocketMQ-的消息存储三/"},{"title":"聊一下 RocketMQ 的消息存储二","url":"/2021/09/12/聊一下-RocketMQ-的消息存储二/"},{"title":"聊一下 RocketMQ 的消息存储四","url":"/2021/10/17/聊一下-RocketMQ-的消息存储四/"},{"title":"聊一下 RocketMQ 的顺序消息","url":"/2021/08/29/聊一下-RocketMQ-的顺序消息/"},{"title":"聊一下 SpringBoot 中使用的 cglib 作为动态代理中的一个注意点","url":"/2021/09/19/聊一下-SpringBoot-中使用的-cglib-作为动态代理中的一个注意点/"},{"title":"给小电驴上牌","url":"/2022/03/20/给小电驴上牌/"},{"title":"聊一下 SpringBoot 中动态切换数据源的方法","url":"/2021/09/26/聊一下-SpringBoot-中动态切换数据源的方法/"},{"title":"聊一下 SpringBoot 设置非 web 应用的方法","url":"/2022/07/31/聊一下-SpringBoot-设置非-web-应用的方法/"},{"title":"聊在东京奥运会闭幕式这天-二","url":"/2021/08/19/聊在东京奥运会闭幕式这天-二/"},{"title":"聊在东京奥运会闭幕式这天","url":"/2021/08/08/聊在东京奥运会闭幕式这天/"},{"title":"聊聊 Dubbo 的 SPI","url":"/2020/05/31/聊聊-Dubbo-的-SPI/"},{"title":"聊聊 Dubbo 的 SPI 续之自适应拓展","url":"/2020/06/06/聊聊-Dubbo-的-SPI-续之自适应拓展/"},{"title":"聊一下关于怎么陪伴学习","url":"/2022/11/06/聊一下关于怎么陪伴学习/"},{"title":"聊聊 Dubbo 的容错机制","url":"/2020/11/22/聊聊-Dubbo-的容错机制/"},{"title":"聊聊 Java 中绕不开的 Synchronized 关键字","url":"/2021/06/20/聊聊-Java-中绕不开的-Synchronized-关键字/"},{"title":"聊聊 Java 的类加载机制一","url":"/2020/11/08/聊聊-Java-的类加载机制/"},{"title":"聊聊 Java 的 equals 和 hashCode 方法","url":"/2021/01/03/聊聊-Java-的-equals-和-hashCode-方法/"},{"title":"聊聊 Java 的类加载机制二","url":"/2021/06/13/聊聊-Java-的类加载机制二/"},{"title":"聊聊 Java 自带的那些*逆天*工具","url":"/2020/08/02/聊聊-Java-自带的那些逆天工具/"},{"title":"聊聊 Linux 下的 top 命令","url":"/2021/03/28/聊聊-Linux-下的-top-命令/"},{"title":"聊聊 RocketMQ 的 Broker 源码","url":"/2020/07/19/聊聊-RocketMQ-的-Broker-源码/"},{"title":"聊聊 Sharding-Jdbc 分库分表下的分页方案","url":"/2022/01/09/聊聊-Sharding-Jdbc-分库分表下的分页方案/"},{"title":"聊聊 Sharding-Jdbc 的简单使用","url":"/2021/12/12/聊聊-Sharding-Jdbc-的简单使用/"},{"title":"聊聊 Sharding-Jdbc 的简单原理初篇","url":"/2021/12/26/聊聊-Sharding-Jdbc-的简单原理初篇/"},{"title":"聊聊 dubbo 的线程池","url":"/2021/04/04/聊聊-dubbo-的线程池/"},{"title":"聊聊 mysql 的 MVCC 续篇","url":"/2020/05/02/聊聊-mysql-的-MVCC-续篇/"},{"title":"聊聊 mysql 的 MVCC 续续篇之锁分析","url":"/2020/05/10/聊聊-mysql-的-MVCC-续续篇之加锁分析/"},{"title":"聊聊 mysql 的 MVCC","url":"/2020/04/26/聊聊-mysql-的-MVCC/"},{"title":"聊聊 mysql 索引的一些细节","url":"/2020/12/27/聊聊-mysql-索引的一些细节/"},{"title":"聊聊 redis 缓存的应用问题","url":"/2021/01/31/聊聊-redis-缓存的应用问题/"},{"title":"聊聊Java中的单例模式","url":"/2019/12/21/聊聊Java中的单例模式/"},{"title":"聊聊 SpringBoot 自动装配","url":"/2021/07/11/聊聊SpringBoot-自动装配/"},{"title":"聊聊一次 brew update 引发的血案","url":"/2020/06/13/聊聊一次-brew-update-引发的血案/"},{"title":"聊聊传说中的 ThreadLocal","url":"/2021/05/30/聊聊传说中的-ThreadLocal/"},{"title":"聊聊厦门旅游的好与不好","url":"/2021/04/11/聊聊厦门旅游的好与不好/"},{"title":"聊聊我刚学会的应用诊断方法","url":"/2020/05/22/聊聊我刚学会的应用诊断方法/"},{"title":"聊聊如何识别和意识到日常生活中的各类危险","url":"/2021/06/06/聊聊如何识别和意识到日常生活中的各类危险/"},{"title":"聊聊我理解的分布式事务","url":"/2020/05/17/聊聊我理解的分布式事务/"},{"title":"聊聊我的远程工作体验","url":"/2022/06/26/聊聊我的远程工作体验/"},{"title":"聊聊最近平淡的生活之又聊通勤","url":"/2021/11/07/聊聊最近平淡的生活/"},{"title":"聊聊 Java 中绕不开的 Synchronized 关键字-二","url":"/2021/06/27/聊聊-Java-中绕不开的-Synchronized-关键字-二/"},{"title":"聊聊最近平淡的生活之《花束般的恋爱》观后感","url":"/2021/12/31/聊聊最近平淡的生活之《花束般的恋爱》观后感/"},{"title":"聊聊最近平淡的生活之看《神探狄仁杰》","url":"/2021/12/19/聊聊最近平淡的生活之看《神探狄仁杰》/"},{"title":"聊聊最近平淡的生活之看看老剧","url":"/2021/11/21/聊聊最近平淡的生活之看看老剧/"},{"title":"聊聊给亲戚朋友的老电脑重装系统那些事儿","url":"/2021/05/09/聊聊给亲戚朋友的老电脑重装系统那些事儿/"},{"title":"聊聊这次换车牌及其他","url":"/2022/02/20/聊聊这次换车牌及其他/"},{"title":"聊聊那些加塞狗","url":"/2021/01/17/聊聊那些加塞狗/"},{"title":"聊聊部分公交车的设计bug","url":"/2021/12/05/聊聊部分公交车的设计bug/"},{"title":"记录下 Java Stream 的一些高效操作","url":"/2022/05/15/记录下-Java-Lambda-的一些高效操作/"},{"title":"记录下 phpunit 的入门使用方法之setUp和tearDown","url":"/2022/10/23/记录下-phpunit-的入门使用方法之setUp和tearDown/"},{"title":"记录下 phpunit 的入门使用方法","url":"/2022/10/16/记录下-phpunit-的入门使用方法/"},{"title":"记录下 redis 的一些使用方法","url":"/2022/10/30/记录下-redis-的一些使用方法/"},{"title":"记录下 zookeeper 集群迁移和易错点","url":"/2022/05/29/记录下-zookeeper-集群迁移/"},{"title":"这周末我又在老丈人家打了天小工","url":"/2020/08/30/这周末我又在老丈人家打了天小工/"},{"title":"重看了下《蛮荒记》说说感受","url":"/2021/10/10/重看了下《蛮荒记》说说感受/"},{"title":"闲聊下乘公交的用户体验","url":"/2021/02/28/闲聊下乘公交的用户体验/"},{"title":"闲话篇-也算碰到了为老不尊和坏人变老了的典型案例","url":"/2022/05/22/闲话篇-也算碰到了为老不尊和坏人变老了的典型案例/"},{"title":"闲话篇-路遇神逻辑骑车带娃爹","url":"/2022/05/08/闲话篇-路遇神逻辑骑车带娃爹/"},{"title":"难得的大扫除","url":"/2022/04/10/难得的大扫除/"},{"title":"记一个容器中 dubbo 注册的小知识点","url":"/2022/10/09/记一个容器中-dubbo-注册的小知识点/"}]
\ No newline at end of file
diff --git a/page/31/index.html b/page/31/index.html
index 91d43fd7db..ed7d8845b4 100644
--- a/page/31/index.html
+++ b/page/31/index.html
@@ -48,7 +48,7 @@ constexpr size_t DATA_ROLL_PTR_LEN
/* This function is called when we are going to perform some operation
* in a given key, but such key may be already logically expired even if
* it still exists in the database. The main way this function is called
* is via lookupKey*() family of functions.
diff --git a/page/33/index.html b/page/33/index.html
index fd912a4f96..3b71a371a9 100644
--- a/page/33/index.html
+++ b/page/33/index.html
@@ -158,7 +158,7 @@ OS name: "mac os x", version: "10.14.6", arch: "x86_64", family: "mac"
这应该是 redis 系列的最后一篇了,讲下快表,其实最前面讲的链表在早先的 redis 版本中也作为 list 的数据结构使用过,但是单纯的链表的缺陷之前也说了,插入便利,但是空间利用率低,并且不能进行二分查找等,检索效率低,ziplist 压缩表的产生也是同理,希望获得更好的性能,包括存储空间和访问性能等,原来我也不懂这个快表要怎么快,然后明白了一个道理,其实并没有什么银弹,只是大牛们会在适合的时候使用最适合的数据结构来实现性能的最大化,这里面有一招就是不同数据结构的组合调整,比如 Java 中的 HashMap,在链表节点数大于 8 时会转变成红黑树,以此提高访问效率,不费话了,回到快表,quicklist,这个数据结构主要使用在 list 类型中,如果我说其实这个 quicklist 就是个链表,可能大家不太会相信,但是事实上的确可以认为 quicklist 是个双向链表,看下代码
/* quicklistNode is a 32 byte struct describing a ziplist for a quicklist.
* We use bit fields keep the quicklistNode at 32 bytes.
* count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k).
* encoding: 2 bits, RAW=1, LZF=2.
diff --git a/page/34/index.html b/page/34/index.html
index 55063fe33c..2309559d96 100644
--- a/page/34/index.html
+++ b/page/34/index.html
@@ -1,4 +1,4 @@
-Nicksxs's Blog - What hurts more, the pain of hard work or the pain of regret?
* |00pppppp| - 1 byte
* String value with length less than or equal to 63 bytes (6 bits).
* "pppppp" represents the unsigned 6 bit length.
* |01pppppp|qqqqqqqq| - 2 bytes
@@ -60,7 +60,7 @@ typedef struct redisObject {
* 1 to 13 because 0000 and 1111 can not be used, so 1 should be
* subtracted from the encoded 4 bit value to obtain the right value.
* |11111111| - End of ziplist special entry.
Flexible array members1 were introduced in the C99 standard of the C programming language (in particular, in section §6.7.2.1, item 16, page 103).2 It is a member of a struct, which is an array without a given dimension. It must be the last member of such a struct and it must be accompanied by at least one other member, as in the following example:
struct vectord {
size_t len;
double arr[]; // the flexible array member must be last
-};
工作没什么大变化,有了些微的提升,可能因为是来了之后做了些项目对比公司与来还算是比较重要的,但是技术难度上没有特别突出的点,可能最开始用 openresty+lua 做了个 ab 测的工具,还是让我比较满意的,后面一般都是业务型的需求,今年可能在业务相关的技术逻辑上有了一些深度的了解,而原来一直想做的业务架构升级和通用型技术中间件这样的优化还是停留在想象中,前面说的 ab 测应该算是个半成品,还是没能多走出这一步,得需要多做一些实在的事情,比如轻量级的业务框架,能够对原先不熟悉的业务逻辑,代码逻辑有比较深入的理解,而不是一直都是让特定的同学负责特定的逻辑,很多时候还是在偷懒,习惯以一些简单安全的方案去做事情,在技术上还是要有所追求,还有就是能够在新语言,主要是 rust,swift 这类的能有些小玩具可以做,rust 的话是因为今年看了一本相关的书,后面三分之一其实消化得不好,这本书整体来说是很不错的,只是 rust 本身在所有权这块,还有引用包装等方面是设计得比较难懂,也可能是我基础差,所以还是想在复习下,可以做一个简单的命令行工具这种,然后 swift 是想说可以做点 mac 的小软件,原生的毕竟性能好点,又小。基于 web 做的客户端大部分都是又丑又大,极少数能好看点,但也是很重,起码 7~80M 的大小,原生的估计能除以 10。 整体的职业规划貌似陷入了比较大的困惑期,在目前公司发展前景不是很大,但是出去貌似也没有比较适合我的机会,总的来说还是杭州比较卷,个人觉得有自己的时间是非常重要的,而且这个不光是用来自我提升的,还是让自己有足够的时间做缓冲,有足够的时间锻炼减肥,时间少的情况下,不光会在仅有的时间里暴饮暴食,还没空锻炼,身体是革命的本钱,现在其实能特别明显地感觉到身体状态下滑,容易疲劳,焦虑。所以是否也许有可能以后要往外企这类的方向去发展。 工作上其实还是有个不大不小的缺点,就是容易激动,容易焦虑,前一点可能有稍稍地改观,因为工作中的很多现状其实是我个人难以改变的,即使觉得不合理,但是结构在那里,还不如自己放宽心,尽量做好事情就行。第二点的话还是做得比较差,一直以来抗压能力都比较差,跟成长环境,家庭环境都有比较大的关系,而且说实在的特别是父母,基本也没有在这方面给我正向的帮助,比较擅长给我施压,从小就是通过压力让我好好读书,当个乖学生,考个好学校,并没有能真正地理解我的压力,教我或者帮助我解压,只会在那说着不着边际的空话,甚至经常反过来对我施压。还是希望能慢慢解开,这点可能对我身体也有影响,也许需要看一些心理疏导相关的书籍。工作篇暂时到这,后续还有其他篇,未完待续哈哈😀
工作没什么大变化,有了些微的提升,可能因为是来了之后做了些项目对比公司与来还算是比较重要的,但是技术难度上没有特别突出的点,可能最开始用 openresty+lua 做了个 ab 测的工具,还是让我比较满意的,后面一般都是业务型的需求,今年可能在业务相关的技术逻辑上有了一些深度的了解,而原来一直想做的业务架构升级和通用型技术中间件这样的优化还是停留在想象中,前面说的 ab 测应该算是个半成品,还是没能多走出这一步,得需要多做一些实在的事情,比如轻量级的业务框架,能够对原先不熟悉的业务逻辑,代码逻辑有比较深入的理解,而不是一直都是让特定的同学负责特定的逻辑,很多时候还是在偷懒,习惯以一些简单安全的方案去做事情,在技术上还是要有所追求,还有就是能够在新语言,主要是 rust,swift 这类的能有些小玩具可以做,rust 的话是因为今年看了一本相关的书,后面三分之一其实消化得不好,这本书整体来说是很不错的,只是 rust 本身在所有权这块,还有引用包装等方面是设计得比较难懂,也可能是我基础差,所以还是想在复习下,可以做一个简单的命令行工具这种,然后 swift 是想说可以做点 mac 的小软件,原生的毕竟性能好点,又小。基于 web 做的客户端大部分都是又丑又大,极少数能好看点,但也是很重,起码 7~80M 的大小,原生的估计能除以 10。 整体的职业规划貌似陷入了比较大的困惑期,在目前公司发展前景不是很大,但是出去貌似也没有比较适合我的机会,总的来说还是杭州比较卷,个人觉得有自己的时间是非常重要的,而且这个不光是用来自我提升的,还是让自己有足够的时间做缓冲,有足够的时间锻炼减肥,时间少的情况下,不光会在仅有的时间里暴饮暴食,还没空锻炼,身体是革命的本钱,现在其实能特别明显地感觉到身体状态下滑,容易疲劳,焦虑。所以是否也许有可能以后要往外企这类的方向去发展。 工作上其实还是有个不大不小的缺点,就是容易激动,容易焦虑,前一点可能有稍稍地改观,因为工作中的很多现状其实是我个人难以改变的,即使觉得不合理,但是结构在那里,还不如自己放宽心,尽量做好事情就行。第二点的话还是做得比较差,一直以来抗压能力都比较差,跟成长环境,家庭环境都有比较大的关系,而且说实在的特别是父母,基本也没有在这方面给我正向的帮助,比较擅长给我施压,从小就是通过压力让我好好读书,当个乖学生,考个好学校,并没有能真正地理解我的压力,教我或者帮助我解压,只会在那说着不着边际的空话,甚至经常反过来对我施压。还是希望能慢慢解开,这点可能对我身体也有影响,也许需要看一些心理疏导相关的书籍。工作篇暂时到这,后续还有其他篇,未完待续哈哈😀
/**
+ * Here is the brief description for all the predefined environments:
+ * <ul>
+ * <li>LOCAL: Local Development environment, assume you are working at the beach with no network access</li>
+ * <li>DEV: Development environment</li>
+ * <li>FWS: Feature Web Service Test environment</li>
+ * <li>FAT: Feature Acceptance Test environment</li>
+ * <li>UAT: User Acceptance Test environment</li>
+ * <li>LPT: Load and Performance Test environment</li>
+ * <li>PRO: Production environment</li>
+ * <li>TOOLS: Tooling environment, a special area in production environment which allows
+ * access to test environment, e.g. Apollo Portal should be deployed in tools environment</li>
+ * </ul>
+ */
/**
- * Here is the brief description for all the predefined environments:
- * <ul>
- * <li>LOCAL: Local Development environment, assume you are working at the beach with no network access</li>
- * <li>DEV: Development environment</li>
- * <li>FWS: Feature Web Service Test environment</li>
- * <li>FAT: Feature Acceptance Test environment</li>
- * <li>UAT: User Acceptance Test environment</li>
- * <li>LPT: Load and Performance Test environment</li>
- * <li>PRO: Production environment</li>
- * <li>TOOLS: Tooling environment, a special area in production environment which allows
- * access to test environment, e.g. Apollo Portal should be deployed in tools environment</li>
- * </ul>
- */
classLhsPadding
-{
- protectedlong p1, p2, p3, p4, p5, p6, p7;
-}
-
-classValueextendsLhsPadding
-{
- protectedvolatilelong value;
-}
-
-classRhsPaddingextendsValue
-{
- protectedlong p9, p10, p11, p12, p13, p14, p15;
-}
-
-/**
- * <p>Concurrent sequence class used for tracking the progress of
- * the ring buffer and event processors. Support a number
- * of concurrent operations including CAS and order writes.
- *
- * <p>Also attempts to be more efficient with regards to false
- * sharing by adding padding around the volatile field.
- */
-publicclassSequenceextendsRhsPadding
-{
-
通过代码可以看到,sequence 中其实真正有意义的是 value 字段,因为需要在多线程环境下可见也 使用了volatile 关键字,而 LhsPadding 和 RhsPadding 分别在value 前后填充了各 7 个 long 型的变量,long 型的变量在 Java 中是占用 8 bytes,这样就相当于不管怎么样, value 都会单独使用一个缓存行,使得其不会产生 false sharing 的问题。
这里主要介绍这个比较新的垃圾回收器,在 G1 之前的垃圾回收器都是基于如下图的内存结构分布,有新生代,老年代和永久代(jdk8 之前),然后G1 往前的那些垃圾回收器都有个分代,比如 serial,parallel 等,一般有个应用的组合,最初的 serial 和 serial old,因为新生代和老年代的收集方式不太一样,新生代主要是标记复制,所以有 eden 跟两个 survival区,老年代一般用标记整理方式,而 G1 对这个不太一样。 看一下 G1 的内存分布 可以看到这有很大的不同,G1 通过将内存分成大小相等的 region,每个region是存在于一个连续的虚拟内存范围,对于某个 region 来说其角色是类似于原来的收集器的Eden、Survivor、Old Generation,这个具体在代码层面
-
// We encode the value of the heap region type so the generation can be
- // determined quickly. The tag is split into two parts:
- //
- // major type (young, old, humongous, archive) : top N-1 bits
- // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
- //
- // If there's need to increase the number of minor types in the
- // future, we'll have to increase the size of the latter and hence
- // decrease the size of the former.
- //
- // 00000 0 [ 0] Free
- //
- // 00001 0 [ 2] Young Mask
- // 00001 0 [ 2] Eden
- // 00001 1 [ 3] Survivor
- //
- // 00010 0 [ 4] Humongous Mask
- // 00100 0 [ 8] Pinned Mask
- // 00110 0 [12] Starts Humongous
- // 00110 1 [13] Continues Humongous
- //
- // 01000 0 [16] Old Mask
- //
- // 10000 0 [32] Archive Mask
- // 11100 0 [56] Open Archive
- // 11100 1 [57] Closed Archive
- //
- typedef enum {
- FreeTag = 0,
+ Disruptor 系列三
+ /2022/09/25/Disruptor-%E7%B3%BB%E5%88%97%E4%B8%89/
+ 原来一直有点被误导, gatingSequences用来标识每个 processer 的操作位点,但是怎么记录更新有点搞不清楚 其实问题在于 gatingSequences 是个 Sequence 数组,首先要看下怎么加进去的, 可以看到是在 com.lmax.disruptor.RingBuffer#addGatingSequences 这个方法里添加 首先是 com.lmax.disruptor.dsl.Disruptor#handleEventsWith(com.lmax.disruptor.EventHandler<? super T>...) 然后执行 com.lmax.disruptor.dsl.Disruptor#createEventProcessors(com.lmax.disruptor.Sequence[], com.lmax.disruptor.EventHandler<? super T>[])
+
EventHandlerGroup<T>createEventProcessors(
+ finalSequence[] barrierSequences,
+ finalEventHandler<?superT>[] eventHandlers)
+ {
+ checkNotStarted();
- YoungMask = 2,
- EdenTag = YoungMask,
- SurvTag = YoungMask + 1,
+ finalSequence[] processorSequences =newSequence[eventHandlers.length];
+ finalSequenceBarrier barrier = ringBuffer.newBarrier(barrierSequences);
- HumongousMask = 4,
- PinnedMask = 8,
- StartsHumongousTag = HumongousMask | PinnedMask,
- ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
+ for(int i =0, eventHandlersLength = eventHandlers.length; i < eventHandlersLength; i++)
+ {
+ finalEventHandler<?superT> eventHandler = eventHandlers[i];
- OldMask = 16,
- OldTag = OldMask,
+ // 这里将 handler 包装成一个 BatchEventProcessor
+ finalBatchEventProcessor<T> batchEventProcessor =
+ newBatchEventProcessor<>(ringBuffer, barrier, eventHandler);
- // Archive regions are regions with immutable content (i.e. not reclaimed, and
- // not allocated into during regular operation). They differ in the kind of references
- // allowed for the contained objects:
- // - Closed archive regions form a separate self-contained (closed) object graph
- // within the set of all of these regions. No references outside of closed
- // archive regions are allowed.
- // - Open archive regions have no restrictions on the references of their objects.
- // Objects within these regions are allowed to have references to objects
- // contained in any other kind of regions.
- ArchiveMask = 32,
- OpenArchiveTag = ArchiveMask | PinnedMask | OldMask,
- ClosedArchiveTag = ArchiveMask | PinnedMask | OldMask + 1
- } Tag;
-packagejavax.servlet;
+ consumerRepository.add(batchEventProcessor, eventHandler, barrier);
+ processorSequences[i]= batchEventProcessor.getSequence();
+ }
-importjava.io.IOException;
+ updateGatingSequencesForNextInChain(barrierSequences, processorSequences);
-/**
- * Defines methods that all servlets must implement.
- *
- * <p>
- * A servlet is a small Java program that runs within a Web server. Servlets
- * receive and respond to requests from Web clients, usually across HTTP, the
- * HyperText Transfer Protocol.
- *
- * <p>
- * To implement this interface, you can write a generic servlet that extends
- * <code>javax.servlet.GenericServlet</code> or an HTTP servlet that extends
- * <code>javax.servlet.http.HttpServlet</code>.
- *
- * <p>
- * This interface defines methods to initialize a servlet, to service requests,
- * and to remove a servlet from the server. These are known as life-cycle
- * methods and are called in the following sequence:
- * <ol>
- * <li>The servlet is constructed, then initialized with the <code>init</code>
- * method.
- * <li>Any calls from clients to the <code>service</code> method are handled.
- * <li>The servlet is taken out of service, then destroyed with the
- * <code>destroy</code> method, then garbage collected and finalized.
- * </ol>
- *
- * <p>
- * In addition to the life-cycle methods, this interface provides the
- * <code>getServletConfig</code> method, which the servlet can use to get any
- * startup information, and the <code>getServletInfo</code> method, which allows
+ returnnewEventHandlerGroup<>(this, consumerRepository, processorSequences);
+ }
publicvoidrun()
+ {
+ if(running.compareAndSet(IDLE,RUNNING))
+ {
+ sequenceBarrier.clearAlert();
+
+ notifyStart();
+ try
+ {
+ if(running.get()==RUNNING)
+ {
+ processEvents();
+ }
+ }
+ finally
+ {
+ notifyShutdown();
+ running.set(IDLE);
+ }
+ }
+ else
+ {
+ // This is a little bit of guess work. The running state could of changed to HALTED by
+ // this point. However, Java does not have compareAndExchange which is the only way
+ // to get it exactly correct.
+ if(running.get()==RUNNING)
+ {
+ thrownewIllegalStateException("Thread is already running");
+ }
+ else
+ {
+ earlyExit();
+ }
+ }
+ }
+packagejavax.servlet;
+
+importjava.io.IOException;
+
+/**
+ * Defines methods that all servlets must implement.
+ *
+ * <p>
+ * A servlet is a small Java program that runs within a Web server. Servlets
+ * receive and respond to requests from Web clients, usually across HTTP, the
+ * HyperText Transfer Protocol.
+ *
+ * <p>
+ * To implement this interface, you can write a generic servlet that extends
+ * <code>javax.servlet.GenericServlet</code> or an HTTP servlet that extends
+ * <code>javax.servlet.http.HttpServlet</code>.
+ *
+ * <p>
+ * This interface defines methods to initialize a servlet, to service requests,
+ * and to remove a servlet from the server. These are known as life-cycle
+ * methods and are called in the following sequence:
+ * <ol>
+ * <li>The servlet is constructed, then initialized with the <code>init</code>
+ * method.
+ * <li>Any calls from clients to the <code>service</code> method are handled.
+ * <li>The servlet is taken out of service, then destroyed with the
+ * <code>destroy</code> method, then garbage collected and finalized.
+ * </ol>
+ *
+ * <p>
+ * In addition to the life-cycle methods, this interface provides the
+ * <code>getServletConfig</code> method, which the servlet can use to get any
+ * startup information, and the <code>getServletInfo</code> method, which allows
* the servlet to return basic information about itself, such as author,
* version, and copyright.
*
@@ -2417,80 +2439,202 @@ Node *clone(Node *graph) {
+ G1收集器概述
+ /2020/02/09/G1%E6%94%B6%E9%9B%86%E5%99%A8%E6%A6%82%E8%BF%B0/
+ G1: The Garbage-First Collector, 垃圾回收优先的垃圾回收器,目标是用户多核 cpu 和大内存的机器,最大的特点就是可预测的停顿时间,官方给出的介绍是提供一个用户在大的堆内存情况下一个低延迟表现的解决方案,通常是 6GB 及以上的堆大小,有低于 0.5 秒稳定的可预测的停顿时间。
+
这里主要介绍这个比较新的垃圾回收器,在 G1 之前的垃圾回收器都是基于如下图的内存结构分布,有新生代,老年代和永久代(jdk8 之前),然后G1 往前的那些垃圾回收器都有个分代,比如 serial,parallel 等,一般有个应用的组合,最初的 serial 和 serial old,因为新生代和老年代的收集方式不太一样,新生代主要是标记复制,所以有 eden 跟两个 survival区,老年代一般用标记整理方式,而 G1 对这个不太一样。 看一下 G1 的内存分布 可以看到这有很大的不同,G1 通过将内存分成大小相等的 region,每个region是存在于一个连续的虚拟内存范围,对于某个 region 来说其角色是类似于原来的收集器的Eden、Survivor、Old Generation,这个具体在代码层面
+
// We encode the value of the heap region type so the generation can be
+ // determined quickly. The tag is split into two parts:
+ //
+ // major type (young, old, humongous, archive) : top N-1 bits
+ // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
+ //
+ // If there's need to increase the number of minor types in the
+ // future, we'll have to increase the size of the latter and hence
+ // decrease the size of the former.
+ //
+ // 00000 0 [ 0] Free
+ //
+ // 00001 0 [ 2] Young Mask
+ // 00001 0 [ 2] Eden
+ // 00001 1 [ 3] Survivor
+ //
+ // 00010 0 [ 4] Humongous Mask
+ // 00100 0 [ 8] Pinned Mask
+ // 00110 0 [12] Starts Humongous
+ // 00110 1 [13] Continues Humongous
+ //
+ // 01000 0 [16] Old Mask
+ //
+ // 10000 0 [32] Archive Mask
+ // 11100 0 [56] Open Archive
+ // 11100 1 [57] Closed Archive
+ //
+ typedef enum {
+ FreeTag = 0,
-
结果
-]]>
-
- Java
- leetcode
-
-
- leetcode
- java
- 题解
-
-
-
- JVM源码分析之G1垃圾收集器分析一
- /2019/12/07/JVM-G1-Part-1/
+ YoungMask = 2,
+ EdenTag = YoungMask,
+ SurvTag = YoungMask + 1,
+
+ HumongousMask = 4,
+ PinnedMask = 8,
+ StartsHumongousTag = HumongousMask | PinnedMask,
+ ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
+
+ OldMask = 16,
+ OldTag = OldMask,
+
+ // Archive regions are regions with immutable content (i.e. not reclaimed, and
+ // not allocated into during regular operation). They differ in the kind of references
+ // allowed for the contained objects:
+ // - Closed archive regions form a separate self-contained (closed) object graph
+ // within the set of all of these regions. No references outside of closed
+ // archive regions are allowed.
+ // - Open archive regions have no restrictions on the references of their objects.
+ // Objects within these regions are allowed to have references to objects
+ // contained in any other kind of regions.
+ ArchiveMask = 32,
+ OpenArchiveTag = ArchiveMask | PinnedMask | OldMask,
+ ClosedArchiveTag = ArchiveMask | PinnedMask | OldMask + 1
+ } Tag;
classLhsPadding
+{
+ protectedlong p1, p2, p3, p4, p5, p6, p7;
+}
+
+classValueextendsLhsPadding
+{
+ protectedvolatilelong value;
+}
+
+classRhsPaddingextendsValue
+{
+ protectedlong p9, p10, p11, p12, p13, p14, p15;
+}
+
+/**
+ * <p>Concurrent sequence class used for tracking the progress of
+ * the ring buffer and event processors. Support a number
+ * of concurrent operations including CAS and order writes.
+ *
+ * <p>Also attempts to be more efficient with regards to false
+ * sharing by adding padding around the volatile field.
+ */
+publicclassSequenceextendsRhsPadding
+{
+
通过代码可以看到,sequence 中其实真正有意义的是 value 字段,因为需要在多线程环境下可见也 使用了volatile 关键字,而 LhsPadding 和 RhsPadding 分别在value 前后填充了各 7 个 long 型的变量,long 型的变量在 Java 中是占用 8 bytes,这样就相当于不管怎么样, value 都会单独使用一个缓存行,使得其不会产生 false sharing 的问题。
publicintmaxSubArray(int[] nums){
- int max = nums[0];
- int sum = nums[0];
- for(int i =1; i < nums.length; i++){
- // 这里最重要的就是这一行了,其实就是如果前面的 sum 是小于 0 的,那么就不需要前面的 sum,反正加上了还不如不加大
- sum =Math.max(nums[i], sum + nums[i]);
- // max 是用来承载最大值的
- max =Math.max(max, sum);
- }
- return max;
- }
Error initializing error="failed to read or create private key: failed to save private key to disk: open /etc/headscale/private.key: read-only file system"
---
+# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
+#
+# - `/etc/headscale`
+# - `~/.headscale`
+# - current working directory
+# The url clients will connect to.
+# Typically this will be a domain like:
+#
+# https://myheadscale.example.com:443
+#
+server_url: http://127.0.0.1:8080
-
EventHandlerGroup<T>createEventProcessors(
- finalSequence[] barrierSequences,
- finalEventHandler<?superT>[] eventHandlers)
- {
- checkNotStarted();
+# Address to listen to / bind to on the server
+#
+# For production:
+# listen_addr: 0.0.0.0:8080
+listen_addr: 127.0.0.1:8080
- finalSequence[] processorSequences =newSequence[eventHandlers.length];
- finalSequenceBarrier barrier = ringBuffer.newBarrier(barrierSequences);
+# Address to listen to /metrics, you may want
+# to keep this endpoint private to your internal
+# network
+#
+metrics_listen_addr: 127.0.0.1:9090
- for(int i =0, eventHandlersLength = eventHandlers.length; i < eventHandlersLength; i++)
- {
- finalEventHandler<?superT> eventHandler = eventHandlers[i];
+# Address to listen for gRPC.
+# gRPC is used for controlling a headscale server
+# remotely with the CLI
+# Note: Remote access _only_ works if you have
+# valid certificates.
+#
+# For production:
+# grpc_listen_addr: 0.0.0.0:50443
+grpc_listen_addr: 127.0.0.1:50443
- // 这里将 handler 包装成一个 BatchEventProcessor
- finalBatchEventProcessor<T> batchEventProcessor =
- newBatchEventProcessor<>(ringBuffer, barrier, eventHandler);
+# Allow the gRPC admin interface to run in INSECURE
+# mode. This is not recommended as the traffic will
+# be unencrypted. Only enable if you know what you
+# are doing.
+grpc_allow_insecure:false
- if(exceptionHandler !=null)
- {
- batchEventProcessor.setExceptionHandler(exceptionHandler);
- }
+# Private key used to encrypt the traffic between headscale
+# and Tailscale clients.
+# The private key file will be autogenerated if it's missing.
+#
+# For production:
+# /var/lib/headscale/private.key
+private_key_path: ./private.key
- consumerRepository.add(batchEventProcessor, eventHandler, barrier);
- processorSequences[i]= batchEventProcessor.getSequence();
- }
+# The Noise section includes specific configuration for the
+# TS2021 Noise protocol
+noise:
+ # The Noise private key is used to encrypt the
+ # traffic between headscale and Tailscale clients when
+ # using the new Noise-based protocol. It must be different
+ # from the legacy private key.
+ #
+ # For production:
+ # private_key_path: /var/lib/headscale/noise_private.key
+ private_key_path: ./noise_private.key
- updateGatingSequencesForNextInChain(barrierSequences, processorSequences);
+# List of IP prefixes to allocate tailaddresses from.
+# Each prefix consists of either an IPv4 or IPv6 address,
+# and the associated prefix length, delimited by a slash.
+# While this looks like it can take arbitrary values, it
+# needs to be within IP ranges supported by the Tailscale
+# client.
+# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
+# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
+ip_prefixes:
+ - fd7a:115c:a1e0::/48
+ - 100.64.0.0/10
- returnnewEventHandlerGroup<>(this, consumerRepository, processorSequences);
- }
+# DERP is a relay system that Tailscale uses when a direct
+# connection cannot be established.
+# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
+#
+# headscale needs a list of DERP servers that can be presented
+# to the clients.
+derp:
+ server:
+ # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
+ # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
+ enabled:false
-
+ # Region ID to use for the embedded DERP server.
+ # The local DERP prevails if the region ID collides with other region ID coming from
+ # the regular DERP config.
+ region_id:999
-
publicvoidrun()
- {
- if(running.compareAndSet(IDLE,RUNNING))
- {
- sequenceBarrier.clearAlert();
+ # Region code and name are displayed in the Tailscale UI to identify a DERP region
+ region_code:"headscale"
+ region_name:"Headscale Embedded DERP"
- notifyStart();
- try
- {
- if(running.get()==RUNNING)
- {
- processEvents();
- }
- }
- finally
- {
- notifyShutdown();
- running.set(IDLE);
- }
- }
- else
- {
- // This is a little bit of guess work. The running state could of changed to HALTED by
- // this point. However, Java does not have compareAndExchange which is the only way
- // to get it exactly correct.
- if(running.get()==RUNNING)
- {
- thrownewIllegalStateException("Thread is already running");
- }
- else
- {
- earlyExit();
- }
- }
- }
-
然后是
-
privatevoidprocessEvents()
- {
- T event =null;
- long nextSequence = sequence.get()+1L;
+ # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
+ # When the embedded DERP server is enabled stun_listen_addr MUST be defined.
+ #
+ # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
+ stun_listen_addr:"0.0.0.0:3478"
- while(true)
- {
- try
- {
- finallong availableSequence = sequenceBarrier.waitFor(nextSequence);
- if(batchStartAware !=null)
- {
- batchStartAware.onBatchStart(availableSequence - nextSequence +1);
- }
+ # List of externally available DERP maps encoded in JSON
+ urls:
+ - https://controlplane.tailscale.com/derpmap/default
- while(nextSequence <= availableSequence)
- {
- event = dataProvider.get(nextSequence);
- eventHandler.onEvent(event, nextSequence, nextSequence == availableSequence);
- nextSequence++;
- }
- // 如果正常处理完,那就是会更新为 availableSequence,因为都处理好了
- sequence.set(availableSequence);
- }
- catch(finalTimeoutException e)
- {
- notifyTimeout(sequence.get());
- }
- catch(finalAlertException ex)
- {
- if(running.get()!=RUNNING)
- {
- break;
- }
- }
- catch(finalThrowable ex)
- {
- handleEventException(ex, nextSequence, event);
- // 如果是异常就只是 nextSequence
- sequence.set(nextSequence);
- nextSequence++;
- }
- }
- }
-]]>
-
- Java
-
-
- Java
- Disruptor
-
-
-
- Leetcode 1115 交替打印 FooBar ( Print FooBar Alternately *Medium* ) 题解分析
- /2022/05/01/Leetcode-1115-%E4%BA%A4%E6%9B%BF%E6%89%93%E5%8D%B0-FooBar-Print-FooBar-Alternately-Medium-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
- 无聊想去 roll 一题就看到了有并发题,就找到了这题,其实一眼看我的想法也是用信号量,但是用 condition 应该也是可以处理的,不过这类问题好像本地有点难调,因为它好像是抽取代码执行的,跟直观的逻辑比较不一样 Suppose you are given the following code:
-
classFooBar{
- publicvoidfoo(){
- for(int i =0; i < n; i++){
- print("foo");
- }
- }
+ # Locally available DERP map files encoded in YAML
+ #
+ # This option is mostly interesting for people hosting
+ # their own DERP servers:
+ # https://tailscale.com/kb/1118/custom-derp-servers/
+ #
+ # paths:
+ # - /etc/headscale/derp-example.yaml
+ paths:[]
- publicvoidbar(){
- for(int i =0; i < n; i++){
- print("bar");
- }
- }
-}
-
The same instance of FooBar will be passed to two different threads:
-
-
thread A will call foo(), while
-
thread B will call bar(). Modify the given program to output "foobar" n times.
-
-
示例
Example 1:
-
Input: n = 1 Output: “foobar” Explanation: There are two threads being fired asynchronously. One of them calls foo(), while the other calls bar(). “foobar” is being output 1 time.
-
-
Example 2:
-
Input: n = 2 Output: “foobarfoobar” Explanation: “foobar” is being output 2 times.
-
-
题解
简析
其实用信号量是很直观的,就是让打印 foo 的线程先拥有信号量,打印后就等待,给 bar 信号量 + 1,然后 bar 线程运行打印消耗 bar 信号量,再给 foo 信号量 + 1
-
code
classFooBar{
-
- privatefinalSemaphore foo =newSemaphore(1);
- privatefinalSemaphore bar =newSemaphore(0);
- privateint n;
+ # If enabled, a worker will be set up to periodically
+ # refresh the given sources and update the derpmap
+ # will be set up.
+ auto_update_enabled:true
- publicFooBar(int n){
- this.n = n;
- }
+ # How often should we check for DERP updates?
+ update_frequency: 24h
- publicvoidfoo(Runnable printFoo)throwsInterruptedException{
-
- for(int i =0; i < n; i++){
- foo.acquire();
- // printFoo.run() outputs "foo". Do not change or remove this line.
- printFoo.run();
- bar.release();
- }
- }
+# Disables the automatic check for headscale updates on startup
+disable_check_updates:false
- publicvoidbar(Runnable printBar)throwsInterruptedException{
-
- for(int i =0; i < n; i++){
- bar.acquire();
- // printBar.run() outputs "bar". Do not change or remove this line.
- printBar.run();
- foo.release();
- }
- }
-}
Error initializing error="failed to read or create private key: failed to save private key to disk: open /etc/headscale/private.key: read-only file system"
---
-# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
-#
-# - `/etc/headscale`
-# - `~/.headscale`
-# - current working directory
+# Period to check for node updates within the tailnet. A value too low will severely affect
+# CPU consumption of Headscale. A value too high (over 60s) will cause problems
+# for the nodes, as they won't get updates or keep alive messages frequently enough.
+# In case of doubts, do not touch the default 10s.
+node_update_check_interval: 10s
-# The url clients will connect to.
-# Typically this will be a domain like:
-#
-# https://myheadscale.example.com:443
-#
-server_url: http://127.0.0.1:8080
+# SQLite config
+db_type: sqlite3
-# Address to listen to / bind to on the server
-## For production:
-# listen_addr: 0.0.0.0:8080
-listen_addr: 127.0.0.1:8080
+# db_path: /var/lib/headscale/db.sqlite
+db_path: ./db.sqlite
-# Address to listen to /metrics, you may want
-# to keep this endpoint private to your internal
-# network
-#
-metrics_listen_addr: 127.0.0.1:9090
+# # Postgres config
+# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
+# db_type: postgres
+# db_host: localhost
+# db_port: 5432
+# db_name: headscale
+# db_user: foo
+# db_pass: bar
-# Address to listen for gRPC.
-# gRPC is used for controlling a headscale server
-# remotely with the CLI
-# Note: Remote access _only_ works if you have
-# valid certificates.
+# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
+# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
+# db_ssl: false
+
+### TLS configuration#
-# For production:
-# grpc_listen_addr: 0.0.0.0:50443
-grpc_listen_addr: 127.0.0.1:50443
+## Let's encrypt / ACME
+#
+# headscale supports automatically requesting and setting up
+# TLS for a domain with Let's Encrypt.
+#
+# URL to ACME directory
+acme_url: https://acme-v02.api.letsencrypt.org/directory
-# Allow the gRPC admin interface to run in INSECURE
-# mode. This is not recommended as the traffic will
-# be unencrypted. Only enable if you know what you
-# are doing.
-grpc_allow_insecure:false
+# Email to register with ACME provider
+acme_email:""
-# Private key used to encrypt the traffic between headscale
-# and Tailscale clients.
-# The private key file will be autogenerated if it's missing.
-#
+# Domain name to request a TLS certificate for:
+tls_letsencrypt_hostname:""
+
+# Path to store certificates and metadata needed by
+# letsencrypt# For production:
-# /var/lib/headscale/private.key
-private_key_path: ./private.key
-
-# The Noise section includes specific configuration for the
-# TS2021 Noise protocol
-noise:
- # The Noise private key is used to encrypt the
- # traffic between headscale and Tailscale clients when
- # using the new Noise-based protocol. It must be different
- # from the legacy private key.
- #
- # For production:
- # private_key_path: /var/lib/headscale/noise_private.key
- private_key_path: ./noise_private.key
-
-# List of IP prefixes to allocate tailaddresses from.
-# Each prefix consists of either an IPv4 or IPv6 address,
-# and the associated prefix length, delimited by a slash.
-# While this looks like it can take arbitrary values, it
-# needs to be within IP ranges supported by the Tailscale
-# client.
-# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
-# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
-ip_prefixes:
- - fd7a:115c:a1e0::/48
- - 100.64.0.0/10
-
-# DERP is a relay system that Tailscale uses when a direct
-# connection cannot be established.
-# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
-#
-# headscale needs a list of DERP servers that can be presented
-# to the clients.
-derp:
- server:
- # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
- # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
- enabled:false
-
- # Region ID to use for the embedded DERP server.
- # The local DERP prevails if the region ID collides with other region ID coming from
- # the regular DERP config.
- region_id:999
-
- # Region code and name are displayed in the Tailscale UI to identify a DERP region
- region_code:"headscale"
- region_name:"Headscale Embedded DERP"
-
- # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
- # When the embedded DERP server is enabled stun_listen_addr MUST be defined.
- #
- # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
- stun_listen_addr:"0.0.0.0:3478"
-
- # List of externally available DERP maps encoded in JSON
- urls:
- - https://controlplane.tailscale.com/derpmap/default
-
- # Locally available DERP map files encoded in YAML
- #
- # This option is mostly interesting for people hosting
- # their own DERP servers:
- # https://tailscale.com/kb/1118/custom-derp-servers/
- #
- # paths:
- # - /etc/headscale/derp-example.yaml
- paths:[]
-
- # If enabled, a worker will be set up to periodically
- # refresh the given sources and update the derpmap
- # will be set up.
- auto_update_enabled:true
-
- # How often should we check for DERP updates?
- update_frequency: 24h
-
-# Disables the automatic check for headscale updates on startup
-disable_check_updates:false
-
-# Time before an inactive ephemeral node is deleted?
-ephemeral_node_inactivity_timeout: 30m
-
-# Period to check for node updates within the tailnet. A value too low will severely affect
-# CPU consumption of Headscale. A value too high (over 60s) will cause problems
-# for the nodes, as they won't get updates or keep alive messages frequently enough.
-# In case of doubts, do not touch the default 10s.
-node_update_check_interval: 10s
-
-# SQLite config
-db_type: sqlite3
-
-# For production:
-# db_path: /var/lib/headscale/db.sqlite
-db_path: ./db.sqlite
-
-# # Postgres config
-# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
-# db_type: postgres
-# db_host: localhost
-# db_port: 5432
-# db_name: headscale
-# db_user: foo
-# db_pass: bar
-
-# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
-# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
-# db_ssl: false
-
-### TLS configuration
-#
-## Let's encrypt / ACME
-#
-# headscale supports automatically requesting and setting up
-# TLS for a domain with Let's Encrypt.
-#
-# URL to ACME directory
-acme_url: https://acme-v02.api.letsencrypt.org/directory
-
-# Email to register with ACME provider
-acme_email:""
-
-# Domain name to request a TLS certificate for:
-tls_letsencrypt_hostname:""
-
-# Path to store certificates and metadata needed by
-# letsencrypt
-# For production:
-# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
-tls_letsencrypt_cache_dir: ./cache
+# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
+tls_letsencrypt_cache_dir: ./cache
# Type of ACME challenge to use, currently supported types:# HTTP-01 or TLS-ALPN-01
@@ -3949,49 +3672,97 @@ inorder = [9,3,15,20,7]<
- Leetcode 124 二叉树中的最大路径和(Binary Tree Maximum Path Sum) 题解分析
- /2021/01/24/Leetcode-124-%E4%BA%8C%E5%8F%89%E6%A0%91%E4%B8%AD%E7%9A%84%E6%9C%80%E5%A4%A7%E8%B7%AF%E5%BE%84%E5%92%8C-Binary-Tree-Maximum-Path-Sum-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
- 题目介绍
A path in a binary tree is a sequence of nodes where each pair of adjacent nodes in the sequence has an edge connecting them. A node can only appear in the sequence at most once. Note that the path does not need to pass through the root.
-
The path sum of a path is the sum of the node’s values in the path.
-
Given the root of a binary tree, return the maximum path sum of any path.
publicintmaxSubArray(int[] nums){
+ int max = nums[0];
+ int sum = nums[0];
+ for(int i =1; i < nums.length; i++){
+ // 这里最重要的就是这一行了,其实就是如果前面的 sum 是小于 0 的,那么就不需要前面的 sum,反正加上了还不如不加大
+ sum =Math.max(nums[i], sum + nums[i]);
+ // max 是用来承载最大值的
+ max =Math.max(max, sum);
+ }
+ return max;
+ }
Given a 2D grid of size m x n and an integer k. You need to shift the grid k times.
-
In one shift operation:
-
Element at grid[i][j] moves to grid[i][j + 1]. Element at grid[i][n - 1] moves to grid[i + 1][0]. Element at grid[m - 1][n - 1] moves to grid[0][0]. Return the 2D grid after applying shift operation k times.
-
示例
Example 1:
-
-
Input: grid = [[1,2,3],[4,5,6],[7,8,9]], k = 1 Output: [[9,1,2],[3,4,5],[6,7,8]]
// 主体是个递归的应用
+publicintmaxDepth(TreeNode root){
+ // 节点的退出条件之一
+ if(root ==null){
+ return0;
+ }
+ int left =0;
+ int right =0;
+ // 存在左子树,就递归左子树
+ if(root.left !=null){
+ left =maxDepth(root.left);
+ }
+ // 存在右子树,就递归右子树
+ if(root.right !=null){
+ right =maxDepth(root.right);
+ }
+ // 前面返回后,左右取大者
+ returnMath.max(left +1, right +1);
+}
+
分析
其实对于树这类题,一般是以递归形式比较方便,只是要注意退出条件
+]]>
+
+ Java
+ leetcode
+ Binary Tree
+ java
+ Binary Tree
+ DFS
+
+
+ leetcode
+ java
+ Binary Tree
+ DFS
+ 二叉树
+ 题解
+
+
+
+ Leetcode 1115 交替打印 FooBar ( Print FooBar Alternately *Medium* ) 题解分析
+ /2022/05/01/Leetcode-1115-%E4%BA%A4%E6%9B%BF%E6%89%93%E5%8D%B0-FooBar-Print-FooBar-Alternately-Medium-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
+ 无聊想去 roll 一题就看到了有并发题,就找到了这题,其实一眼看我的想法也是用信号量,但是用 condition 应该也是可以处理的,不过这类问题好像本地有点难调,因为它好像是抽取代码执行的,跟直观的逻辑比较不一样 Suppose you are given the following code:
+
classFooBar{
+ publicvoidfoo(){
+ for(int i =0; i < n; i++){
+ print("foo");
+ }
+ }
+
+ publicvoidbar(){
+ for(int i =0; i < n; i++){
+ print("bar");
+ }
+ }
+}
+
The same instance of FooBar will be passed to two different threads:
+
+
thread A will call foo(), while
+
thread B will call bar(). Modify the given program to output "foobar" n times.
+
+
示例
Example 1:
+
Input: n = 1 Output: “foobar” Explanation: There are two threads being fired asynchronously. One of them calls foo(), while the other calls bar(). “foobar” is being output 1 time.
+
+
Example 2:
+
Input: n = 2 Output: “foobarfoobar” Explanation: “foobar” is being output 2 times.
+
+
题解
简析
其实用信号量是很直观的,就是让打印 foo 的线程先拥有信号量,打印后就等待,给 bar 信号量 + 1,然后 bar 线程运行打印消耗 bar 信号量,再给 foo 信号量 + 1
+
code
classFooBar{
+
+ privatefinalSemaphore foo =newSemaphore(1);
+ privatefinalSemaphore bar =newSemaphore(0);
+ privateint n;
+
+ publicFooBar(int n){
+ this.n = n;
+ }
+
+ publicvoidfoo(Runnable printFoo)throwsInterruptedException{
+
+ for(int i =0; i < n; i++){
+ foo.acquire();
+ // printFoo.run() outputs "foo". Do not change or remove this line.
+ printFoo.run();
+ bar.release();
+ }
+ }
+
+ publicvoidbar(Runnable printBar)throwsInterruptedException{
+
+ for(int i =0; i < n; i++){
+ bar.acquire();
+ // printBar.run() outputs "bar". Do not change or remove this line.
+ printBar.run();
+ foo.release();
+ }
+ }
+}
A path in a binary tree is a sequence of nodes where each pair of adjacent nodes in the sequence has an edge connecting them. A node can only appear in the sequence at most once. Note that the path does not need to pass through the root.
+
The path sum of a path is the sum of the node’s values in the path.
+
Given the root of a binary tree, return the maximum path sum of any path.
Given a 2D grid of size m x n and an integer k. You need to shift the grid k times.
+
In one shift operation:
+
Element at grid[i][j] moves to grid[i][j + 1]. Element at grid[i][n - 1] moves to grid[i + 1][0]. Element at grid[m - 1][n - 1] moves to grid[0][0]. Return the 2D grid after applying shift operation k times.
+
示例
Example 1:
+
+
Input: grid = [[1,2,3],[4,5,6],[7,8,9]], k = 1 Output: [[9,1,2],[3,4,5],[6,7,8]]
+
+
Example 2:
Input: grid = [[3,8,1,9],[19,7,2,5],[4,6,11,10],[12,0,21,13]], k = 4 Output: [[12,0,21,13],[3,8,1,9],[19,7,2,5],[4,6,11,10]]
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
+
You may assume the two numbers do not contain any leading zero, except the number 0 itself. 就是给了两个链表,用来表示两个非负的整数,在链表中倒序放着,每个节点包含一位的数字,把他们加起来以后也按照原来的链表结构输出
这里唯二需要注意的就是两个点,一个是循环条件需要包含进位值还存在的情况,还有一个是最后一个节点,如果是空的了,就不要在 new 一个出来了,写的比较挫
+]]>
+
+ Java
+ leetcode
+ java
+ linked list
+ linked list
+
+
+ leetcode
+ java
+ 题解
+ linked list
+
+
+
+ Leetcode 1862 向下取整数对和 ( Sum of Floored Pairs *Hard* ) 题解分析
+ /2022/09/11/Leetcode-1862-%E5%90%91%E4%B8%8B%E5%8F%96%E6%95%B4%E6%95%B0%E5%AF%B9%E5%92%8C-Sum-of-Floored-Pairs-Hard-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
+ 题目介绍
Given an integer array nums, return the sum of floor(nums[i] / nums[j]) for all pairs of indices 0 <= i, j < nums.length in the array. Since the answer may be too large, return it modulo 10^9 + 7.
+
The floor() function returns the integer part of the division.
Input: nums = [2,5,9] Output: 10 Explanation: floor(2 / 5) = floor(2 / 9) = floor(5 / 9) = 0 floor(2 / 2) = floor(5 / 5) = floor(9 / 9) = 1 floor(5 / 2) = 2 floor(9 / 2) = 4 floor(9 / 5) = 1 We calculate the floor of the division for every pair of indices in the array then sum them up.
+
+
Example 2:
+
Input: nums = [7,7,7,7,7,7,7] Output: 49
+
+
Constraints:
+
1 <= nums.length <= 10^5
+
1 <= nums[i] <= 10^5
+
+
简析
这题不愧是 hard,要不是看了讨论区的一个大神的解答感觉从头做得想好久, 主要是两点,对于任何一个在里面的数,随便举个例子是 k,最简单的就是循环所有数对 k 除一下, 这样效率会很低,那么对于 k 有什么规律呢,就是对于所有小于 k 的数,往下取整都是 0,所以不用考虑, 对于所有大于 k 的数我们可以分成一个个的区间,[k,2k-1),[2k,3k-1),[3k,4k-1)……对于这些区间的 除了 k 往下取整,每个区间内的都是一样的,所以可以简化为对于任意一个 k,我只要知道与k 相同的有多少个,然后比 k 大的各个区间各有多少个数就可以了
+
代码
staticfinalintMAXE5=100_000;
+
+staticfinalintMODULUSE9=1_000_000_000+7;
+
+publicintsumOfFlooredPairs(int[] nums){
+ int[] counts =newint[MAXE5+1];
+ for(int num : nums){
+ counts[num]++;
+ }
+ // 这里就是很巧妙的给后一个加上前一个的值,这样其实前后任意两者之差就是这中间的元素数量
+ for(int i =1; i <=MAXE5; i++){
+ counts[i]+= counts[i -1];
+ }
+ long total =0;
+ for(int i =1; i <=MAXE5; i++){
+ long sum =0;
+ if(counts[i]== counts[i-1]){
+ continue;
+ }
+ for(int j =1; i*j <=MAXE5; j++){
+ int min = i * j -1;
+ int upper = i *(j +1)-1;
+ // 在每一个区间内的数量,
+ sum +=(counts[Math.min(upper,MAXE5)]- counts[min])*(long)j;
+ }
+ // 左边乘数的数量,即 i 位置的元素数量
+ total =(total +(sum %MODULUSE9)*(counts[i]- counts[i-1]))%MODULUSE9;
+ }
+ return(int)total;
+}
+]]>
+
+ Java
+ leetcode
+
+
+ leetcode
+ java
+ 题解
+
+ Leetcode 20 有效的括号 ( Valid Parentheses *Easy* ) 题解分析/2022/07/02/Leetcode-20-%E6%9C%89%E6%95%88%E7%9A%84%E6%8B%AC%E5%8F%B7-Valid-Parentheses-Easy-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
@@ -4394,68 +4517,6 @@ Input Explanation: The intersected node's value is 8 (note that this must no
java
-
- Leetcode 1862 向下取整数对和 ( Sum of Floored Pairs *Hard* ) 题解分析
- /2022/09/11/Leetcode-1862-%E5%90%91%E4%B8%8B%E5%8F%96%E6%95%B4%E6%95%B0%E5%AF%B9%E5%92%8C-Sum-of-Floored-Pairs-Hard-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
- 题目介绍
Given an integer array nums, return the sum of floor(nums[i] / nums[j]) for all pairs of indices 0 <= i, j < nums.length in the array. Since the answer may be too large, return it modulo 10^9 + 7.
-
The floor() function returns the integer part of the division.
Input: nums = [2,5,9] Output: 10 Explanation: floor(2 / 5) = floor(2 / 9) = floor(5 / 9) = 0 floor(2 / 2) = floor(5 / 5) = floor(9 / 9) = 1 floor(5 / 2) = 2 floor(9 / 2) = 4 floor(9 / 5) = 1 We calculate the floor of the division for every pair of indices in the array then sum them up.
-
-
Example 2:
-
Input: nums = [7,7,7,7,7,7,7] Output: 49
-
-
Constraints:
-
1 <= nums.length <= 10^5
-
1 <= nums[i] <= 10^5
-
-
简析
这题不愧是 hard,要不是看了讨论区的一个大神的解答感觉从头做得想好久, 主要是两点,对于任何一个在里面的数,随便举个例子是 k,最简单的就是循环所有数对 k 除一下, 这样效率会很低,那么对于 k 有什么规律呢,就是对于所有小于 k 的数,往下取整都是 0,所以不用考虑, 对于所有大于 k 的数我们可以分成一个个的区间,[k,2k-1),[2k,3k-1),[3k,4k-1)……对于这些区间的 除了 k 往下取整,每个区间内的都是一样的,所以可以简化为对于任意一个 k,我只要知道与k 相同的有多少个,然后比 k 大的各个区间各有多少个数就可以了
-
代码
staticfinalintMAXE5=100_000;
-
-staticfinalintMODULUSE9=1_000_000_000+7;
-
-publicintsumOfFlooredPairs(int[] nums){
- int[] counts =newint[MAXE5+1];
- for(int num : nums){
- counts[num]++;
- }
- // 这里就是很巧妙的给后一个加上前一个的值,这样其实前后任意两者之差就是这中间的元素数量
- for(int i =1; i <=MAXE5; i++){
- counts[i]+= counts[i -1];
- }
- long total =0;
- for(int i =1; i <=MAXE5; i++){
- long sum =0;
- if(counts[i]== counts[i-1]){
- continue;
- }
- for(int j =1; i*j <=MAXE5; j++){
- int min = i * j -1;
- int upper = i *(j +1)-1;
- // 在每一个区间内的数量,
- sum +=(counts[Math.min(upper,MAXE5)]- counts[min])*(long)j;
- }
- // 左边乘数的数量,即 i 位置的元素数量
- total =(total +(sum %MODULUSE9)*(counts[i]- counts[i-1]))%MODULUSE9;
- }
- return(int)total;
-}
-]]>
-
- Java
- leetcode
-
-
- leetcode
- java
- 题解
-
- Leetcode 234 回文链表(Palindrome Linked List) 题解分析/2020/11/15/Leetcode-234-%E5%9B%9E%E6%96%87%E8%81%94%E8%A1%A8-Palindrome-Linked-List-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
@@ -4607,75 +4668,6 @@ Input Explanation: The intersected node's value is 8 (note that this must no
First Bad Version
-
- Leetcode 3 Longest Substring Without Repeating Characters 题解分析
- /2020/09/20/Leetcode-3-Longest-Substring-Without-Repeating-Characters-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
- 又做了个题,看记录是以前用 C++写过的,现在捋一捋思路,用 Java 再写了一下,思路还是比较清晰的,但是边界细节处理得比较差
-
简要介绍
Given a string s, find the length of the longest substring without repeating characters.
-
样例
Example 1:
Input: s = "abcabcbb"
-Output: 3
-Explanation: The answer is "abc", with the length of 3.
-
-
Example 2:
Input: s = "bbbbb"
-Output: 1
-Explanation: The answer is "b", with the length of 1.
-
Example 3:
Input: s = "pwwkew"
-Output: 3
-Explanation: The answer is "wke", with the length of 3.
-Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
-
You may assume the two numbers do not contain any leading zero, except the number 0 itself. 就是给了两个链表,用来表示两个非负的整数,在链表中倒序放着,每个节点包含一位的数字,把他们加起来以后也按照原来的链表结构输出
这里唯二需要注意的就是两个点,一个是循环条件需要包含进位值还存在的情况,还有一个是最后一个节点,如果是空的了,就不要在 new 一个出来了,写的比较挫
-]]>
-
- Java
- leetcode
- java
- linked list
- linked list
-
-
- leetcode
- java
- 题解
- linked list
-
- Leetcode 4 寻找两个正序数组的中位数 ( Median of Two Sorted Arrays *Hard* ) 题解分析/2022/03/27/Leetcode-4-%E5%AF%BB%E6%89%BE%E4%B8%A4%E4%B8%AA%E6%AD%A3%E5%BA%8F%E6%95%B0%E7%BB%84%E7%9A%84%E4%B8%AD%E4%BD%8D%E6%95%B0-Median-of-Two-Sorted-Arrays-Hard-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
@@ -4874,6 +4805,75 @@ Output: [8,9,9,9,0,0,0,1]Median of Two Sorted Arrays
+
+ Leetcode 3 Longest Substring Without Repeating Characters 题解分析
+ /2020/09/20/Leetcode-3-Longest-Substring-Without-Repeating-Characters-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
+ 又做了个题,看记录是以前用 C++写过的,现在捋一捋思路,用 Java 再写了一下,思路还是比较清晰的,但是边界细节处理得比较差
+
简要介绍
Given a string s, find the length of the longest substring without repeating characters.
+
样例
Example 1:
Input: s = "abcabcbb"
+Output: 3
+Explanation: The answer is "abc", with the length of 3.
+
+
Example 2:
Input: s = "bbbbb"
+Output: 1
+Explanation: The answer is "b", with the length of 1.
+
Example 3:
Input: s = "pwwkew"
+Output: 3
+Explanation: The answer is "wke", with the length of 3.
+Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
You start at the cell (rStart, cStart) of an rows x cols grid facing east. The northwest corner is at the first row and column in the grid, and the southeast corner is at the last row and column.
-
You will walk in a clockwise spiral shape to visit every position in this grid. Whenever you move outside the grid’s boundary, we continue our walk outside the grid (but may return to the grid boundary later.). Eventually, we reach all rows * cols spaces of the grid.
-
Return an array of coordinates representing the positions of the grid in the order you visited them.
-]]>
-
- Java
- leetcode
-
-
- leetcode
- java
- 题解
-
- Leetcode 698 划分为k个相等的子集 ( Partition to K Equal Sum Subsets *Medium* ) 题解分析/2022/06/19/Leetcode-698-%E5%88%92%E5%88%86%E4%B8%BAk%E4%B8%AA%E7%9B%B8%E7%AD%89%E7%9A%84%E5%AD%90%E9%9B%86-Partition-to-K-Equal-Sum-Subsets-Medium-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
@@ -5178,6 +5104,58 @@ maxR[n -java
+
+ Leetcode 747 至少是其他数字两倍的最大数 ( Largest Number At Least Twice of Others *Easy* ) 题解分析
+ /2022/10/02/Leetcode-747-%E8%87%B3%E5%B0%91%E6%98%AF%E5%85%B6%E4%BB%96%E6%95%B0%E5%AD%97%E4%B8%A4%E5%80%8D%E7%9A%84%E6%9C%80%E5%A4%A7%E6%95%B0-Largest-Number-At-Least-Twice-of-Others-Easy-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
+ 题目介绍
You are given an integer array nums where the largest integer is unique.
+
Determine whether the largest element in the array is at least twice as much as every other number in the array. If it is, return the index of the largest element, or return -1 otherwise. 确认在数组中的最大数是否是其余任意数的两倍大及以上,如果是返回索引,如果不是返回-1
+
示例
Example 1:
+
Input: nums = [3,6,1,0] Output: 1 Explanation: 6 is the largest integer. For every other number in the array x, 6 is at least twice as big as x. The index of value 6 is 1, so we return 1.
+
+
Example 2:
+
Input: nums = [1,2,3,4] Output: -1 Explanation: 4 is less than twice the value of 3, so we return -1.
Given a string, find the length of the longest substring without repeating characters. For example, the longest substring without repeating letters for “abcabcbb” is “abc”, which the length is 3. For “bbbbb” the longest substring is “b”, with the length of 1.
int ct[256];
+ memset(ct, -1, sizeof(ct));
+ int tail = -1;
+ int max = 0;
+ for (int i = 0; i < s.size(); i++){
+ if (ct[s[i]] > tail)
+ tail = ct[s[i]];
+ if (i - tail > max)
+ max = i - tail;
+ ct[s[i]] = i;
+ }
+ return max;
+]]>
+
+ leetcode
+
+
+ leetcode
+ c++
+
+
+
+ Leetcode 885 螺旋矩阵 III ( Spiral Matrix III *Medium* ) 题解分析
+ /2022/08/23/Leetcode-885-%E8%9E%BA%E6%97%8B%E7%9F%A9%E9%98%B5-III-Spiral-Matrix-III-Medium-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
+ 题目介绍
You start at the cell (rStart, cStart) of an rows x cols grid facing east. The northwest corner is at the first row and column in the grid, and the southeast corner is at the last row and column.
+
You will walk in a clockwise spiral shape to visit every position in this grid. Whenever you move outside the grid’s boundary, we continue our walk outside the grid (but may return to the grid boundary later.). Eventually, we reach all rows * cols spaces of the grid.
+
Return an array of coordinates representing the positions of the grid in the order you visited them.
Given a string, find the length of the longest substring without repeating characters. For example, the longest substring without repeating letters for “abcabcbb” is “abc”, which the length is 3. For “bbbbb” the longest substring is “b”, with the length of 1.
int ct[256];
- memset(ct, -1, sizeof(ct));
- int tail = -1;
- int max = 0;
- for (int i = 0; i < s.size(); i++){
- if (ct[s[i]] > tail)
- tail = ct[s[i]];
- if (i - tail > max)
- max = i - tail;
- ct[s[i]] = i;
- }
- return max;
]]>
- leetcode
+ C++
- leetcode
- c++
-
-
-
- Number of 1 Bits
- /2015/03/11/Number-Of-1-Bits/
- Number of 1 Bits
Write a function that takes an unsigned integer and returns the number of ’1’ bits it has (also known as the Hamming weight). For example, the 32-bit integer ‘11’ has binary representation 00000000000000000000000000001011, so the function should return 3.
-
-
分析
从1位到2位到4位逐步的交换
-
-
code
int hammingWeight(uint32_t n) {
- const uint32_t m1 = 0x55555555; //binary: 0101...
- const uint32_t m2 = 0x33333333; //binary: 00110011..
- const uint32_t m4 = 0x0f0f0f0f; //binary: 4 zeros, 4 ones ...
- const uint32_t m8 = 0x00ff00ff; //binary: 8 zeros, 8 ones ...
- const uint32_t m16 = 0x0000ffff; //binary: 16 zeros, 16 ones ...
-
- n = (n & m1 ) + ((n >> 1) & m1 ); //put count of each 2 bits into those 2 bits
- n = (n & m2 ) + ((n >> 2) & m2 ); //put count of each 4 bits into those 4 bits
- n = (n & m4 ) + ((n >> 4) & m4 ); //put count of each 8 bits into those 8 bits
- n = (n & m8 ) + ((n >> 8) & m8 ); //put count of each 16 bits into those 16 bits
- n = (n & m16) + ((n >> 16) & m16); //put count of each 32 bits into those 32 bits
- return n;
-
-}
]]>
-
- leetcode
-
-
- leetcodec++
+ mfc
@@ -5400,6 +5443,36 @@ OS name: "mac os x", version: "10.14.6", arch: "x86_64&
Maven
+
+ Number of 1 Bits
+ /2015/03/11/Number-Of-1-Bits/
+ Number of 1 Bits
Write a function that takes an unsigned integer and returns the number of ’1’ bits it has (also known as the Hamming weight). For example, the 32-bit integer ‘11’ has binary representation 00000000000000000000000000001011, so the function should return 3.
+
+
分析
从1位到2位到4位逐步的交换
+
+
code
int hammingWeight(uint32_t n) {
+ const uint32_t m1 = 0x55555555; //binary: 0101...
+ const uint32_t m2 = 0x33333333; //binary: 00110011..
+ const uint32_t m4 = 0x0f0f0f0f; //binary: 4 zeros, 4 ones ...
+ const uint32_t m8 = 0x00ff00ff; //binary: 8 zeros, 8 ones ...
+ const uint32_t m16 = 0x0000ffff; //binary: 16 zeros, 16 ones ...
+
+ n = (n & m1 ) + ((n >> 1) & m1 ); //put count of each 2 bits into those 2 bits
+ n = (n & m2 ) + ((n >> 2) & m2 ); //put count of each 4 bits into those 4 bits
+ n = (n & m4 ) + ((n >> 4) & m4 ); //put count of each 8 bits into those 8 bits
+ n = (n & m8 ) + ((n >> 8) & m8 ); //put count of each 16 bits into those 16 bits
+ n = (n & m16) + ((n >> 16) & m16); //put count of each 32 bits into those 32 bits
+ return n;
+
+}
-]]>
-
- C++
-
-
- c++
- mfc
-
-
-
- Path Sum
- /2015/01/04/Path-Sum/
- problem
Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum.
+ two sum
+ /2015/01/14/Two-Sum/
+ problem
Given an array of integers, find two numbers such that they add up to a specific target number.
+
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
-
For example: Given the below binary tree and sum = 22,
-
5
- / \
- 4 8
- / / \
- 11 13 4
- / \ \
-7 2 1
-
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
+]]>
+
+ data analysis
+
+
+ hadoop
+ cluster
+
+ docker比一般多一点的初学者介绍/2020/03/08/docker%E6%AF%94%E4%B8%80%E8%88%AC%E5%A4%9A%E4%B8%80%E7%82%B9%E7%9A%84%E5%88%9D%E5%AD%A6%E8%80%85%E4%BB%8B%E7%BB%8D/
@@ -5705,130 +5844,65 @@ Run a commandproblem
Given an array of integers, find two numbers such that they add up to a specific target number.
-
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
-
-
You may assume that each input would have exactly one solution.
Given an array of n positive integers and a positive integer s, find the minimal length of a subarray of which the sum ≥ s. If there isn’t one, return 0 instead.
+
For example, given the array [2,3,1,2,4,3] and s = 7, the subarray [4,3] has the minimal length under the problem constraint.
class Solution {
+public:
+ int minSubArrayLen(int s, vector<int>& nums) {
+ int len = nums.size();
+ if(len == 0) return 0;
+ int minlen = INT_MAX;
+ int sum = 0;
+
+ int left = 0;
+ int right = -1;
+ while(right < len)
+ {
+ while(sum < s && right < len)
+ sum += nums[++right];
+ if(sum >= s)
+ {
+ minlen = minlen < right - left + 1 ? minlen : right - left + 1;
+ sum -= nums[left++];
+ }
+ }
+ return minlen > len ? 0 : minlen;
+ }
+};
+]]>
+
+ leetcode
+
+
+ leetcode
+ c++
+
+ C++ 指针使用中的一个小问题/2014/12/23/my-new-post/
@@ -6133,41 +6183,69 @@ public:
- minimum-size-subarray-sum-209
- /2016/10/11/minimum-size-subarray-sum-209/
- problem
Given an array of n positive integers and a positive integer s, find the minimal length of a subarray of which the sum ≥ s. If there isn’t one, return 0 instead.
-
For example, given the array [2,3,1,2,4,3] and s = 7, the subarray [4,3] has the minimal length under the problem constraint.
]]>
-
- Java
- Mybatis
- Mysql
-
-
- Java
- Mysql
- Mybatis
-
-
-
- Leetcode 747 至少是其他数字两倍的最大数 ( Largest Number At Least Twice of Others *Easy* ) 题解分析
- /2022/10/02/Leetcode-747-%E8%87%B3%E5%B0%91%E6%98%AF%E5%85%B6%E4%BB%96%E6%95%B0%E5%AD%97%E4%B8%A4%E5%80%8D%E7%9A%84%E6%9C%80%E5%A4%A7%E6%95%B0-Largest-Number-At-Least-Twice-of-Others-Easy-%E9%A2%98%E8%A7%A3%E5%88%86%E6%9E%90/
- 题目介绍
You are given an integer array nums where the largest integer is unique.
-
Determine whether the largest element in the array is at least twice as much as every other number in the array. If it is, return the index of the largest element, or return -1 otherwise. 确认在数组中的最大数是否是其余任意数的两倍大及以上,如果是返回索引,如果不是返回-1
-
示例
Example 1:
-
Input: nums = [3,6,1,0] Output: 1 Explanation: 6 is the largest integer. For every other number in the array x, 6 is at least twice as big as x. The index of value 6 is 1, so we return 1.
-
-
Example 2:
-
Input: nums = [1,2,3,4] Output: -1 Explanation: 4 is less than twice the value of 3, so we return -1.
privatePooledConnectionpopConnection(String username,String password)throwsSQLException{
+ boolean countedWait =false;
+ PooledConnection conn =null;
+ long t =System.currentTimeMillis();
+ int localBadConnectionCount =0;
+ while(conn ==null){
+ lock.lock();
+ try{
+ if(!state.idleConnections.isEmpty()){
+ // Pool has available connection
+ conn = state.idleConnections.remove(0);
+ if(log.isDebugEnabled()){
+ log.debug("Checked out connection "+ conn.getRealHashCode()+" from pool.");
+ }
+ }else{
+ // Pool does not have available connection
+ if(state.activeConnections.size()< poolMaximumActiveConnections){
+ // Can create new connection
+ conn =newPooledConnection(dataSource.getConnection(),this);
+ if(log.isDebugEnabled()){
+ log.debug("Created connection "+ conn.getRealHashCode()+".");
+ }
+ }else{
+ // Cannot create new connection
+ PooledConnection oldestActiveConnection = state.activeConnections.get(0);
+ long longestCheckoutTime = oldestActiveConnection.getCheckoutTime();
+ if(longestCheckoutTime > poolMaximumCheckoutTime){
+ // Can claim overdue connection
+ state.claimedOverdueConnectionCount++;
+ state.accumulatedCheckoutTimeOfOverdueConnections += longestCheckoutTime;
+ state.accumulatedCheckoutTime += longestCheckoutTime;
+ state.activeConnections.remove(oldestActiveConnection);
+ if(!oldestActiveConnection.getRealConnection().getAutoCommit()){
+ try{
+ oldestActiveConnection.getRealConnection().rollback();
+ }catch(SQLException e){
+ /*
+ Just log a message for debug and continue to execute the following
+ statement like nothing happened.
+ Wrap the bad connection with a new PooledConnection, this will help
+ to not interrupt current executing thread and give current thread a
+ chance to join the next competition for another valid/good database
+ connection. At the end of this loop, bad {@link @conn} will be set as null.
+ */
+ log.debug("Bad connection. Could not roll back");
+ }
+ }
+ conn =newPooledConnection(oldestActiveConnection.getRealConnection(),this);
+ conn.setCreatedTimestamp(oldestActiveConnection.getCreatedTimestamp());
+ conn.setLastUsedTimestamp(oldestActiveConnection.getLastUsedTimestamp());
+ oldestActiveConnection.invalidate();
+ if(log.isDebugEnabled()){
+ log.debug("Claimed overdue connection "+ conn.getRealHashCode()+".");
+ }
+ }else{
+ // Must wait
+ try{
+ if(!countedWait){
+ state.hadToWaitCount++;
+ countedWait =true;
+ }
+ if(log.isDebugEnabled()){
+ log.debug("Waiting as long as "+ poolTimeToWait +" milliseconds for connection.");
+ }
+ long wt =System.currentTimeMillis();
+ condition.await(poolTimeToWait,TimeUnit.MILLISECONDS);
+ state.accumulatedWaitTime +=System.currentTimeMillis()- wt;
+ }catch(InterruptedException e){
+ // set interrupt flag
+ Thread.currentThread().interrupt();
+ break;
+ }
+ }
+ }
+ }
+ if(conn !=null){
+ // ping to server and check the connection is valid or not
+ if(conn.isValid()){
+ if(!conn.getRealConnection().getAutoCommit()){
+ conn.getRealConnection().rollback();
+ }
+ conn.setConnectionTypeCode(assembleConnectionTypeCode(dataSource.getUrl(), username, password));
+ conn.setCheckoutTimestamp(System.currentTimeMillis());
+ conn.setLastUsedTimestamp(System.currentTimeMillis());
+ state.activeConnections.add(conn);
+ state.requestCount++;
+ state.accumulatedRequestTime +=System.currentTimeMillis()- t;
+ }else{
+ if(log.isDebugEnabled()){
+ log.debug("A bad connection ("+ conn.getRealHashCode()+") was returned from the pool, getting another connection.");
+ }
+ state.badConnectionCount++;
+ localBadConnectionCount++;
+ conn =null;
+ if(localBadConnectionCount >(poolMaximumIdleConnections + poolMaximumLocalBadConnectionTolerance)){
+ if(log.isDebugEnabled()){
+ log.debug("PooledDataSource: Could not get a good connection to the database.");
+ }
+ thrownewSQLException("PooledDataSource: Could not get a good connection to the database.");
+ }
+ }
+ }
+ }finally{
+ lock.unlock();
+ }
-
public<T>TselectOne(String statement,Object parameter){
- // Popular vote was to return null on 0 results and throw exception on too many.
- List<T> list =this.selectList(statement, parameter);
- if(list.size()==1){
- return list.get(0);
- }elseif(list.size()>1){
- thrownewTooManyResultsException("Expected one result (or null) to be returned by selectOne(), but found: "+ list.size());
- }else{
- returnnull;
- }
-}
+ }
+ if(conn ==null){
+ if(log.isDebugEnabled()){
+ log.debug("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
+ }
+ thrownewSQLException("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
+ }
-
privatePooledConnectionpopConnection(String username,String password)throwsSQLException{
- boolean countedWait =false;
- PooledConnection conn =null;
- long t =System.currentTimeMillis();
- int localBadConnectionCount =0;
- while(conn ==null){
- lock.lock();
- try{
- if(!state.idleConnections.isEmpty()){
- // Pool has available connection
- conn = state.idleConnections.remove(0);
- if(log.isDebugEnabled()){
- log.debug("Checked out connection "+ conn.getRealHashCode()+" from pool.");
- }
- }else{
- // Pool does not have available connection
- if(state.activeConnections.size()< poolMaximumActiveConnections){
- // Can create new connection
- conn =newPooledConnection(dataSource.getConnection(),this);
- if(log.isDebugEnabled()){
- log.debug("Created connection "+ conn.getRealHashCode()+".");
- }
- }else{
- // Cannot create new connection
- PooledConnection oldestActiveConnection = state.activeConnections.get(0);
- long longestCheckoutTime = oldestActiveConnection.getCheckoutTime();
- if(longestCheckoutTime > poolMaximumCheckoutTime){
- // Can claim overdue connection
- state.claimedOverdueConnectionCount++;
- state.accumulatedCheckoutTimeOfOverdueConnections += longestCheckoutTime;
- state.accumulatedCheckoutTime += longestCheckoutTime;
- state.activeConnections.remove(oldestActiveConnection);
- if(!oldestActiveConnection.getRealConnection().getAutoCommit()){
- try{
- oldestActiveConnection.getRealConnection().rollback();
- }catch(SQLException e){
- /*
- Just log a message for debug and continue to execute the following
- statement like nothing happened.
- Wrap the bad connection with a new PooledConnection, this will help
- to not interrupt current executing thread and give current thread a
- chance to join the next competition for another valid/good database
- connection. At the end of this loop, bad {@link @conn} will be set as null.
- */
- log.debug("Bad connection. Could not roll back");
- }
- }
- conn =newPooledConnection(oldestActiveConnection.getRealConnection(),this);
- conn.setCreatedTimestamp(oldestActiveConnection.getCreatedTimestamp());
- conn.setLastUsedTimestamp(oldestActiveConnection.getLastUsedTimestamp());
- oldestActiveConnection.invalidate();
- if(log.isDebugEnabled()){
- log.debug("Claimed overdue connection "+ conn.getRealHashCode()+".");
- }
- }else{
- // Must wait
- try{
- if(!countedWait){
- state.hadToWaitCount++;
- countedWait =true;
- }
- if(log.isDebugEnabled()){
- log.debug("Waiting as long as "+ poolTimeToWait +" milliseconds for connection.");
- }
- long wt =System.currentTimeMillis();
- condition.await(poolTimeToWait,TimeUnit.MILLISECONDS);
- state.accumulatedWaitTime +=System.currentTimeMillis()- wt;
- }catch(InterruptedException e){
- // set interrupt flag
- Thread.currentThread().interrupt();
- break;
- }
- }
- }
- }
- if(conn !=null){
- // ping to server and check the connection is valid or not
- if(conn.isValid()){
- if(!conn.getRealConnection().getAutoCommit()){
- conn.getRealConnection().rollback();
- }
- conn.setConnectionTypeCode(assembleConnectionTypeCode(dataSource.getUrl(), username, password));
- conn.setCheckoutTimestamp(System.currentTimeMillis());
- conn.setLastUsedTimestamp(System.currentTimeMillis());
- state.activeConnections.add(conn);
- state.requestCount++;
- state.accumulatedRequestTime +=System.currentTimeMillis()- t;
- }else{
- if(log.isDebugEnabled()){
- log.debug("A bad connection ("+ conn.getRealHashCode()+") was returned from the pool, getting another connection.");
- }
- state.badConnectionCount++;
- localBadConnectionCount++;
- conn =null;
- if(localBadConnectionCount >(poolMaximumIdleConnections + poolMaximumLocalBadConnectionTolerance)){
- if(log.isDebugEnabled()){
- log.debug("PooledDataSource: Could not get a good connection to the database.");
- }
- thrownewSQLException("PooledDataSource: Could not get a good connection to the database.");
- }
- }
- }
- }finally{
- lock.unlock();
- }
+ // 语句的主要参数解析
+ SqlSource sqlSource = langDriver.createSqlSource(configuration, context, parameterTypeClass);
+ StatementType statementType =StatementType.valueOf(context.getStringAttribute("statementType",StatementType.PREPARED.toString()));
+ Integer fetchSize = context.getIntAttribute("fetchSize");
+ Integer timeout = context.getIntAttribute("timeout");
+ String parameterMap = context.getStringAttribute("parameterMap");
+ String resultType = context.getStringAttribute("resultType");
+ Class<?> resultTypeClass =resolveClass(resultType);
+ String resultMap = context.getStringAttribute("resultMap");
+ String resultSetType = context.getStringAttribute("resultSetType");
+ ResultSetType resultSetTypeEnum =resolveResultSetType(resultSetType);
+ if(resultSetTypeEnum ==null){
+ resultSetTypeEnum = configuration.getDefaultResultSetType();
+ }
+ String keyProperty = context.getStringAttribute("keyProperty");
+ String keyColumn = context.getStringAttribute("keyColumn");
+ String resultSets = context.getStringAttribute("resultSets");
- }
+ // --------> 添加映射的statement
+ builderAssistant.addMappedStatement(id, sqlSource, statementType, sqlCommandType,
+ fetchSize, timeout, parameterMap, parameterTypeClass, resultMap, resultTypeClass,
+ resultSetTypeEnum, flushCache, useCache, resultOrdered,
+ keyGenerator, keyProperty, keyColumn, databaseId, langDriver, resultSets);
+}
- if(conn ==null){
- if(log.isDebugEnabled()){
- log.debug("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
- }
- thrownewSQLException("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
- }
- return conn;
- }
public<T>TselectOne(String statement,Object parameter){
+ // Popular vote was to return null on 0 results and throw exception on too many.
+ List<T> list =this.selectList(statement, parameter);
+ if(list.size()==1){
+ return list.get(0);
+ }elseif(list.size()>1){
+ thrownewTooManyResultsException("Expected one result (or null) to be returned by selectOne(), but found: "+ list.size());
+ }else{
+ returnnull;
+ }
+}
- while(conn ==null){
- lock.lock();
- try{
- if(!state.idleConnections.isEmpty()){
- // Pool has available connection
- conn = state.idleConnections.remove(0);
- if(log.isDebugEnabled()){
- log.debug("Checked out connection "+ conn.getRealHashCode()+" from pool.");
- }
- }else{
- // Pool does not have available connection
- if(state.activeConnections.size()< poolMaximumActiveConnections){
- // Can create new connection
- // ------------> 走到这里会创建PooledConnection,但是里面会先调用dataSource.getConnection()
- conn =newPooledConnection(dataSource.getConnection(),this);
- if(log.isDebugEnabled()){
- log.debug("Created connection "+ conn.getRealHashCode()+".");
- }
- }else{
- // Cannot create new connection
- PooledConnection oldestActiveConnection = state.activeConnections.get(0);
- long longestCheckoutTime = oldestActiveConnection.getCheckoutTime();
- if(longestCheckoutTime > poolMaximumCheckoutTime){
- // Can claim overdue connection
- state.claimedOverdueConnectionCount++;
- state.accumulatedCheckoutTimeOfOverdueConnections += longestCheckoutTime;
- state.accumulatedCheckoutTime += longestCheckoutTime;
- state.activeConnections.remove(oldestActiveConnection);
- if(!oldestActiveConnection.getRealConnection().getAutoCommit()){
- try{
- oldestActiveConnection.getRealConnection().rollback();
- }catch(SQLException e){
- /*
- Just log a message for debug and continue to execute the following
- statement like nothing happened.
- Wrap the bad connection with a new PooledConnection, this will help
- to not interrupt current executing thread and give current thread a
- chance to join the next competition for another valid/good database
- connection. At the end of this loop, bad {@link @conn} will be set as null.
- */
- log.debug("Bad connection. Could not roll back");
- }
- }
- conn =newPooledConnection(oldestActiveConnection.getRealConnection(),this);
- conn.setCreatedTimestamp(oldestActiveConnection.getCreatedTimestamp());
- conn.setLastUsedTimestamp(oldestActiveConnection.getLastUsedTimestamp());
- oldestActiveConnection.invalidate();
- if(log.isDebugEnabled()){
- log.debug("Claimed overdue connection "+ conn.getRealHashCode()+".");
- }
- }else{
- // Must wait
- try{
- if(!countedWait){
- state.hadToWaitCount++;
- countedWait =true;
- }
- if(log.isDebugEnabled()){
- log.debug("Waiting as long as "+ poolTimeToWait +" milliseconds for connection.");
- }
- long wt =System.currentTimeMillis();
- condition.await(poolTimeToWait,TimeUnit.MILLISECONDS);
- state.accumulatedWaitTime +=System.currentTimeMillis()- wt;
- }catch(InterruptedException e){
- // set interrupt flag
- Thread.currentThread().interrupt();
- break;
- }
- }
- }
- }
- if(conn !=null){
- // ping to server and check the connection is valid or not
- if(conn.isValid()){
- if(!conn.getRealConnection().getAutoCommit()){
- conn.getRealConnection().rollback();
- }
- conn.setConnectionTypeCode(assembleConnectionTypeCode(dataSource.getUrl(), username, password));
- conn.setCheckoutTimestamp(System.currentTimeMillis());
- conn.setLastUsedTimestamp(System.currentTimeMillis());
- state.activeConnections.add(conn);
- state.requestCount++;
- state.accumulatedRequestTime +=System.currentTimeMillis()- t;
- }else{
- if(log.isDebugEnabled()){
- log.debug("A bad connection ("+ conn.getRealHashCode()+") was returned from the pool, getting another connection.");
- }
- state.badConnectionCount++;
- localBadConnectionCount++;
- conn =null;
- if(localBadConnectionCount >(poolMaximumIdleConnections + poolMaximumLocalBadConnectionTolerance)){
- if(log.isDebugEnabled()){
- log.debug("PooledDataSource: Could not get a good connection to the database.");
- }
- thrownewSQLException("PooledDataSource: Could not get a good connection to the database.");
- }
- }
- }
- }finally{
- lock.unlock();
- }
+
publicBoundSqlgetBoundSql(Object parameterObject){
+ BoundSql boundSql = sqlSource.getBoundSql(parameterObject);
+ List<ParameterMapping> parameterMappings = boundSql.getParameterMappings();
+ if(parameterMappings ==null|| parameterMappings.isEmpty()){
+ boundSql =newBoundSql(configuration, boundSql.getSql(), parameterMap.getParameterMappings(), parameterObject);}
-privatestaticConnectiongetConnection(
- String url,java.util.Properties info,Class<?> caller)throwsSQLException{
- /*
- * When callerCl is null, we should check the application's
- * (which is invoking this class indirectly)
- * classloader, so that the JDBC driver class outside rt.jar
- * can be loaded from here.
- */
- ClassLoader callerCL = caller !=null? caller.getClassLoader():null;
- synchronized(DriverManager.class){
- // synchronize loading of the correct classloader.
- if(callerCL ==null){
- callerCL =Thread.currentThread().getContextClassLoader();
- }
- }
- if(url ==null){
- thrownewSQLException("The url cannot be null","08001");
+ // check for nested result maps in parameter mappings (issue #30)
+ for(ParameterMapping pm : boundSql.getParameterMappings()){
+ String rmId = pm.getResultMapId();
+ if(rmId !=null){
+ ResultMap rm = configuration.getResultMap(rmId);
+ if(rm !=null){
+ hasNestedResultMaps |= rm.hasNestedResultMaps();}
+ }
+ }
- println("DriverManager.getConnection(\""+ url +"\")");
+ return boundSql;
+ }
- // Walk through the loaded registeredDrivers attempting to make a connection.
- // Remember the first exception that gets raised so we can reraise it.
- SQLException reason =null;
+
$request_time
-request processing timein seconds with a milliseconds resolution;
-time elapsed between the first bytes were read from the client and the log write after the last bytes were sent to the client
后来又碰到了一个坑是nginx有个client_body_buffer_size的配置参数,nginx在32位和64位系统里有8K和16K两个默认值,当请求内容大于这两个值的时候,会把请求内容放到临时文件里,这个时候openresty里的ngx.req.get_post_args()就会报“failed to get post args: requesty body in temp file not supported”这个错误,将client_body_buffer_size这个参数配置调大一点就好了
local json =require("cjson.safe")
-local str =[[ {"key:"value"} ]]
+
即调用了
+
@Override
+ publicConnectiongetConnection()throwsSQLException{
+ if(connection ==null){
+ openConnection();
+ }
+ return connection;
+ }
-local t = json.decode(str)
-if t then
- ngx.say(" --> ",type(t))
-end
-
cjson.safe包会在解析失败的时候返回nil
-
-
还有一个是redis链接时如果host使用的是域名的话会提示“failed to connect: no resolver defined to resolve “redis.xxxxxx.com””,这里需要使用nginx的resolver指令, resolver 8.8.8.8 valid=3600s;
Perl Compatible Regular Expressions (PCRE) is a regular expression C library inspired by the regular expression capabilities in the Perl programming language, written by Philip Hazel, starting in summer 1997.
#include <pcre.h>
-int pcre_exec(const pcre *code, const pcre_extra *extra, const char *subject, int length, int startoffset, int options, int *ovector, int ovecsize)
<?php
-interface int1{
- const INTER1 = 111;
- function inter1();
-}
-interface int2{
- const INTER1 = 222;
- function inter2();
-}
-abstract class abst1{
- public function abstr1(){
- echo 1111;
- }
- abstract function abstra1(){
- echo 'ahahahha';
- }
-}
-abstract class abst2{
- public function abstr2(){
- echo 1111;
- }
- abstract function abstra2();
-}
-class normal1 extends abst1{
- protected function abstr2(){
- echo 222;
- }
-}
+ while(conn ==null){
+ lock.lock();
+ try{
+ if(!state.idleConnections.isEmpty()){
+ // Pool has available connection
+ conn = state.idleConnections.remove(0);
+ if(log.isDebugEnabled()){
+ log.debug("Checked out connection "+ conn.getRealHashCode()+" from pool.");
+ }
+ }else{
+ // Pool does not have available connection
+ if(state.activeConnections.size()< poolMaximumActiveConnections){
+ // Can create new connection
+ // ------------> 走到这里会创建PooledConnection,但是里面会先调用dataSource.getConnection()
+ conn =newPooledConnection(dataSource.getConnection(),this);
+ if(log.isDebugEnabled()){
+ log.debug("Created connection "+ conn.getRealHashCode()+".");
+ }
+ }else{
+ // Cannot create new connection
+ PooledConnection oldestActiveConnection = state.activeConnections.get(0);
+ long longestCheckoutTime = oldestActiveConnection.getCheckoutTime();
+ if(longestCheckoutTime > poolMaximumCheckoutTime){
+ // Can claim overdue connection
+ state.claimedOverdueConnectionCount++;
+ state.accumulatedCheckoutTimeOfOverdueConnections += longestCheckoutTime;
+ state.accumulatedCheckoutTime += longestCheckoutTime;
+ state.activeConnections.remove(oldestActiveConnection);
+ if(!oldestActiveConnection.getRealConnection().getAutoCommit()){
+ try{
+ oldestActiveConnection.getRealConnection().rollback();
+ }catch(SQLException e){
+ /*
+ Just log a message for debug and continue to execute the following
+ statement like nothing happened.
+ Wrap the bad connection with a new PooledConnection, this will help
+ to not interrupt current executing thread and give current thread a
+ chance to join the next competition for another valid/good database
+ connection. At the end of this loop, bad {@link @conn} will be set as null.
+ */
+ log.debug("Bad connection. Could not roll back");
+ }
+ }
+ conn =newPooledConnection(oldestActiveConnection.getRealConnection(),this);
+ conn.setCreatedTimestamp(oldestActiveConnection.getCreatedTimestamp());
+ conn.setLastUsedTimestamp(oldestActiveConnection.getLastUsedTimestamp());
+ oldestActiveConnection.invalidate();
+ if(log.isDebugEnabled()){
+ log.debug("Claimed overdue connection "+ conn.getRealHashCode()+".");
+ }
+ }else{
+ // Must wait
+ try{
+ if(!countedWait){
+ state.hadToWaitCount++;
+ countedWait =true;
+ }
+ if(log.isDebugEnabled()){
+ log.debug("Waiting as long as "+ poolTimeToWait +" milliseconds for connection.");
+ }
+ long wt =System.currentTimeMillis();
+ condition.await(poolTimeToWait,TimeUnit.MILLISECONDS);
+ state.accumulatedWaitTime +=System.currentTimeMillis()- wt;
+ }catch(InterruptedException e){
+ // set interrupt flag
+ Thread.currentThread().interrupt();
+ break;
+ }
+ }
+ }
+ }
+ if(conn !=null){
+ // ping to server and check the connection is valid or not
+ if(conn.isValid()){
+ if(!conn.getRealConnection().getAutoCommit()){
+ conn.getRealConnection().rollback();
+ }
+ conn.setConnectionTypeCode(assembleConnectionTypeCode(dataSource.getUrl(), username, password));
+ conn.setCheckoutTimestamp(System.currentTimeMillis());
+ conn.setLastUsedTimestamp(System.currentTimeMillis());
+ state.activeConnections.add(conn);
+ state.requestCount++;
+ state.accumulatedRequestTime +=System.currentTimeMillis()- t;
+ }else{
+ if(log.isDebugEnabled()){
+ log.debug("A bad connection ("+ conn.getRealHashCode()+") was returned from the pool, getting another connection.");
+ }
+ state.badConnectionCount++;
+ localBadConnectionCount++;
+ conn =null;
+ if(localBadConnectionCount >(poolMaximumIdleConnections + poolMaximumLocalBadConnectionTolerance)){
+ if(log.isDebugEnabled()){
+ log.debug("PooledDataSource: Could not get a good connection to the database.");
+ }
+ thrownewSQLException("PooledDataSource: Could not get a good connection to the database.");
+ }
+ }
+ }
+ }finally{
+ lock.unlock();
+ }
-
result
PHP Fatal error: Abstract function abst1::abstra1() cannot contain body in new.php on line 17
+ }
-Fatal error: Abstract function abst1::abstra1() cannot contain body in php on line 17
publicclassTypeAliasRegistry{
+ if(conn ==null){
+ if(log.isDebugEnabled()){
+ log.debug("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
+ }
+ thrownewSQLException("PooledDataSource: Unknown severe error condition. The connection pool returned a null connection.");
+ }
- privatefinalMap<String,Class<?>> typeAliases =newHashMap<>();
]]>Java
@@ -8334,1373 +8374,795 @@ Fatal error: Abstract function abst1::abstra1() cannot contain body in php on li
- rabbitmq-tips
- /2017/04/25/rabbitmq-tips/
- rabbitmq 介绍
Cluster status of node rabbit@rabbit1 ...
-[{nodes,[{disc,[rabbit@rabbit1,rabbit@rabbit2,rabbit@rabbit3]}]},
- {running_nodes,[rabbit@rabbit2,rabbit@rabbit1]}]
-...done.
这里碰到过一个坑,对于使用exchange来做消息路由的,会有一个情况,就是在routing_key没被订阅的时候,会将该条找不到路由对应的queue的消息丢掉What happens if we break our contract and send a message with one or four words, like "orange" or "quick.orange.male.rabbit"? Well, these messages won't match any bindings and will be lost.对应链接,而当使用空的exchange时,会保留消息,当出现消费者的时候就可以将收到之前生产者所推送的消息对应链接,这里就是用了空的exchange。
这里引用了 redis 在 github 上最早的 2.2 版本的代码,代码路径是https://github.com/antirez/redis/blob/2.2/src/sds.h,可以看到这个结构体里只有仨元素,两个 int 型和一个 char 型数组,两个 int 型其实就是我说的优化,因为 C 语言本身的字符串数组,有两个问题,一个是要知道它实际已被占用的长度,需要去遍历这个数组,第二个就是比较容易踩坑的是遍历的时候要注意它有个以\0作为结尾的特点;通过上面的两个 int 型参数,一个是知道字符串目前的长度,一个是知道字符串还剩余多少位空间,这样子坐着两个操作从 O(N)简化到了O(1)了,还有第二个 free 还有个比较重要的作用就是能防止 C 字符串的溢出问题,在存储之前可以先判断 free 长度,如果长度不够就先扩容了,先介绍到这,这个系列可以写蛮多的,慢慢介绍吧
-
链表
链表是比较常见的数据结构了,但是因为 redis 是用 C 写的,所以在不依赖第三方库的情况下只能自己写一个了,redis 的链表是个有头的链表,而且是无环的,具体的结构我也找了 github 上最早版本的代码
字典也是个常用的数据结构,其实只是叫法不同,数据结构中叫 hash 散列,Java 中叫 Map,PHP 中是数组 array,Python 中也叫字典 dict,因为纯 C 语言本身不带这些数据结构,所以这也是个痛并快乐着的过程,享受 C 语言的高性能的同时也要接受它只提供了语言的基本功能的现实,各种轮子都需要自己造,redis 同样实现了自己的字典 下面来看看代码
-
typedef struct dictEntry {
- void *key;
- void *val;
- struct dictEntry *next;
-} dictEntry;
-
-typedef struct dictType {
- unsigned int (*hashFunction)(const void *key);
- void *(*keyDup)(void *privdata, const void *key);
- void *(*valDup)(void *privdata, const void *obj);
- int (*keyCompare)(void *privdata, const void *key1, const void *key2);
- void (*keyDestructor)(void *privdata, void *key);
- void (*valDestructor)(void *privdata, void *obj);
-} dictType;
-
-/* This is our hash table structure. Every dictionary has two of this as we
- * implement incremental rehashing, for the old to the new table. */
-typedef struct dictht {
- dictEntry **table;
- unsigned long size;
- unsigned long sizemask;
- unsigned long used;
-} dictht;
+ pcre-intro-and-a-simple-package
+ /2015/01/16/pcre-intro-and-a-simple-package/
+ Pcre
+
Perl Compatible Regular Expressions (PCRE) is a regular expression C library inspired by the regular expression capabilities in the Perl programming language, written by Philip Hazel, starting in summer 1997.
+
-typedef struct dict {
- dictType *type;
- void *privdata;
- dictht ht[2];
- int rehashidx; /* rehashing not in progress if rehashidx == -1 */
- int iterators; /* number of iterators currently running */
-} dict;
#include <pcre.h>
+int pcre_exec(const pcre *code, const pcre_extra *extra, const char *subject, int length, int startoffset, int options, int *ovector, int ovecsize)
Flexible array members1 were introduced in the C99 standard of the C programming language (in particular, in section §6.7.2.1, item 16, page 103).2 It is a member of a struct, which is an array without a given dimension. It must be the last member of such a struct and it must be accompanied by at least one other member, as in the following example:
-
-
struct vectord {
- size_t len;
- double arr[]; // the flexible array member must be last
-};
后来又碰到了一个坑是nginx有个client_body_buffer_size的配置参数,nginx在32位和64位系统里有8K和16K两个默认值,当请求内容大于这两个值的时候,会把请求内容放到临时文件里,这个时候openresty里的ngx.req.get_post_args()就会报“failed to get post args: requesty body in temp file not supported”这个错误,将client_body_buffer_size这个参数配置调大一点就好了
local decode =require("cjson").decode
+
+functionjson_decode( str )
+ local ok, t =pcall(decode, str)
+ ifnot ok then
+ returnnil
+ end
+
+ return t
+end
local json =require("cjson.safe")
+local str =[[ {"key:"value"} ]]
+
+local t = json.decode(str)
+if t then
+ ngx.say(" --> ",type(t))
+end
+
cjson.safe包会在解析失败的时候返回nil
+
+
还有一个是redis链接时如果host使用的是域名的话会提示“failed to connect: no resolver defined to resolve “redis.xxxxxx.com””,这里需要使用nginx的resolver指令, resolver 8.8.8.8 valid=3600s;
PHP Fatal error: Abstract function abst1::abstra1() cannot contain body in new.php on line 17
+
+Fatal error: Abstract function abst1::abstra1() cannot contain body in php on line 17
/* The actual Redis Object */
-#define OBJ_STRING 0 /* String object. */
-#define OBJ_LIST 1 /* List object. */
-#define OBJ_SET 2 /* Set object. */
-#define OBJ_ZSET 3 /* Sorted set object. */
-#define OBJ_HASH 4 /* Hash object. */
-/*
- * Objects encoding. Some kind of objects like Strings and Hashes can be
- * internally represented in multiple ways. The 'encoding' field of the object
- * is set to one of this fields for this object. */
-#define OBJ_ENCODING_RAW 0 /* Raw representation */
-#define OBJ_ENCODING_INT 1 /* Encoded as integer */
-#define OBJ_ENCODING_HT 2 /* Encoded as hash table */
-#define OBJ_ENCODING_ZIPMAP 3 /* Encoded as zipmap */
-#define OBJ_ENCODING_LINKEDLIST 4 /* No longer used: old list encoding. */
-#define OBJ_ENCODING_ZIPLIST 5 /* Encoded as ziplist */
-#define OBJ_ENCODING_INTSET 6 /* Encoded as intset */
-#define OBJ_ENCODING_SKIPLIST 7 /* Encoded as skiplist */
-#define OBJ_ENCODING_EMBSTR 8 /* Embedded sds string encoding */
-#define OBJ_ENCODING_QUICKLIST 9 /* Encoded as linked list of ziplists */
-#define OBJ_ENCODING_STREAM 10 /* Encoded as a radix tree of listpacks */
-
-#define LRU_BITS 24
-#define LRU_CLOCK_MAX ((1<<LRU_BITS)-1) /* Max value of obj->lru */
-#define LRU_CLOCK_RESOLUTION 1000 /* LRU clock resolution in ms */
-
-#define OBJ_SHARED_REFCOUNT INT_MAX
-typedef struct redisObject {
- unsigned type:4;
- unsigned encoding:4;
- unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or
- * LFU data (least significant 8 bits frequency
- * and most significant 16 bits access time). */
- int refcount;
- void *ptr;
-} robj;
Cluster status of node rabbit@rabbit1 ...
+[{nodes,[{disc,[rabbit@rabbit1,rabbit@rabbit2,rabbit@rabbit3]}]},
+ {running_nodes,[rabbit@rabbit2,rabbit@rabbit1]}]
+...done.
这里碰到过一个坑,对于使用exchange来做消息路由的,会有一个情况,就是在routing_key没被订阅的时候,会将该条找不到路由对应的queue的消息丢掉What happens if we break our contract and send a message with one or four words, like "orange" or "quick.orange.male.rabbit"? Well, these messages won't match any bindings and will be lost.对应链接,而当使用空的exchange时,会保留消息,当出现消费者的时候就可以将收到之前生产者所推送的消息对应链接,这里就是用了空的exchange。
$request_time
+request processing timein seconds with a milliseconds resolution;
+time elapsed between the first bytes were read from the client and the log write after the last bytes were sent to the client
/* quicklistNode is a 32 byte struct describing a ziplist for a quicklist.
- * We use bit fields keep the quicklistNode at 32 bytes.
- * count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k).
- * encoding: 2 bits, RAW=1, LZF=2.
- * container: 2 bits, NONE=1, ZIPLIST=2.
- * recompress: 1 bit, bool, true if node is temporarry decompressed for usage.
- * attempted_compress: 1 bit, boolean, used for verifying during testing.
- * extra: 10 bits, free for future use; pads out the remainder of 32 bits */
-typedef struct quicklistNode {
- struct quicklistNode *prev;
- struct quicklistNode *next;
- unsigned char *zl;
- unsigned int sz; /* ziplist size in bytes */
- unsigned int count : 16; /* count of items in ziplist */
- unsigned int encoding : 2; /* RAW==1 or LZF==2 */
- unsigned int container : 2; /* NONE==1 or ZIPLIST==2 */
- unsigned int recompress : 1; /* was this node previous compressed? */
- unsigned int attempted_compress : 1; /* node can't compress; too small */
- unsigned int extra : 10; /* more bits to steal for future usage */
-} quicklistNode;
-
-/* quicklistLZF is a 4+N byte struct holding 'sz' followed by 'compressed'.
- * 'sz' is byte length of 'compressed' field.
- * 'compressed' is LZF data with total (compressed) length 'sz'
- * NOTE: uncompressed length is stored in quicklistNode->sz.
- * When quicklistNode->zl is compressed, node->zl points to a quicklistLZF */
-typedef struct quicklistLZF {
- unsigned int sz; /* LZF size in bytes*/
- char compressed[];
-} quicklistLZF;
-
-/* quicklist is a 40 byte struct (on 64-bit systems) describing a quicklist.
- * 'count' is the number of total entries.
- * 'len' is the number of quicklist nodes.
- * 'compress' is: -1 if compression disabled, otherwise it's the number
- * of quicklistNodes to leave uncompressed at ends of quicklist.
- * 'fill' is the user-requested (or default) fill factor. */
-typedef struct quicklist {
- quicklistNode *head;
- quicklistNode *tail;
- unsigned long count; /* total count of all entries in all ziplists */
- unsigned long len; /* number of quicklistNodes */
- int fill : 16; /* fill factor for individual nodes */
- unsigned int compress : 16; /* depth of end nodes not to compress;0=off */
-} quicklist;
* |00pppppp| - 1 byte
-* String value with length less than or equal to 63 bytes (6 bits).
-* "pppppp" represents the unsigned 6 bit length.
-* |01pppppp|qqqqqqqq| - 2 bytes
-* String value with length less than or equal to 16383 bytes (14 bits).
-* IMPORTANT: The 14 bit number is stored in big endian.
-* |10000000|qqqqqqqq|rrrrrrrr|ssssssss|tttttttt| - 5 bytes
-* String value with length greater than or equal to 16384 bytes.
-* Only the 4 bytes following the first byte represents the length
-* up to 32^2-1. The 6 lower bits of the first byte are not used and
-* are set to zero.
-* IMPORTANT: The 32 bit number is stored in big endian.
-* |11000000| - 3 bytes
-* Integer encoded as int16_t (2 bytes).
-* |11010000| - 5 bytes
-* Integer encoded as int32_t (4 bytes).
-* |11100000| - 9 bytes
-* Integer encoded as int64_t (8 bytes).
-* |11110000| - 4 bytes
-* Integer encoded as 24 bit signed (3 bytes).
-* |11111110| - 2 bytes
-* Integer encoded as 8 bit signed (1 byte).
-* |1111xxxx| - (with xxxx between 0000 and 1101) immediate 4 bit integer.
-* Unsigned integer from 0 to 12. The encoded value is actually from
-* 1 to 13 because 0000 and 1111 can not be used, so 1 should be
-* subtracted from the encoded 4 bit value to obtain the right value.
-* |11111111| - End of ziplist special entry.
/* This function is called when we are going to perform some operation
- * in a given key, but such key may be already logically expired even if
- * it still exists in the database. The main way this function is called
- * is via lookupKey*() family of functions.
- *
- * The behavior of the function depends on the replication role of the
- * instance, because slave instances do not expire keys, they wait
- * for DELs from the master for consistency matters. However even
- * slaves will try to have a coherent return value for the function,
- * so that read commands executed in the slave side will be able to
- * behave like if the key is expired even if still present (because the
- * master has yet to propagate the DEL).
- *
- * In masters as a side effect of finding a key which is expired, such
- * key will be evicted from the database. Also this may trigger the
- * propagation of a DEL/UNLINK command in AOF / replication stream.
- *
- * The return value of the function is 0 if the key is still valid,
- * otherwise the function returns 1 if the key is expired. */
-int expireIfNeeded(redisDb *db, robj *key) {
- if (!keyIsExpired(db,key)) return 0;
-
- /* If we are running in the context of a slave, instead of
- * evicting the expired key from the database, we return ASAP:
- * the slave key expiration is controlled by the master that will
- * send us synthesized DEL operations for expired keys.
- *
- * Still we try to return the right information to the caller,
- * that is, 0 if we think the key should be still valid, 1 if
- * we think the key is expired at this time. */
- if (server.masterhost != NULL) return 1;
-
- /* Delete the key */
- server.stat_expiredkeys++;
- propagateExpire(db,key,server.lazyfree_lazy_expire);
- notifyKeyspaceEvent(NOTIFY_EXPIRED,
- "expired",key,db->id);
- return server.lazyfree_lazy_expire ? dbAsyncDelete(db,key) :
- dbSyncDelete(db,key);
-}
-
-/* Check if the key is expired. */
-int keyIsExpired(redisDb *db, robj *key) {
- mstime_t when = getExpire(db,key);
- mstime_t now;
-
- if (when < 0) return 0; /* No expire for this key */
-
- /* Don't expire anything while loading. It will be done later. */
- if (server.loading) return 0;
-
- /* If we are in the context of a Lua script, we pretend that time is
- * blocked to when the Lua script started. This way a key can expire
- * only the first time it is accessed and not in the middle of the
- * script execution, making propagation to slaves / AOF consistent.
- * See issue #1525 on Github for more information. */
- if (server.lua_caller) {
- now = server.lua_time_start;
- }
- /* If we are in the middle of a command execution, we still want to use
- * a reference time that does not change: in that case we just use the
- * cached time, that we update before each call in the call() function.
- * This way we avoid that commands such as RPOPLPUSH or similar, that
- * may re-open the same key multiple times, can invalidate an already
- * open object in a next call, if the next call will see the key expired,
- * while the first did not. */
- else if (server.fixed_time_expire > 0) {
- now = server.mstime;
- }
- /* For the other cases, we want to use the most fresh time we have. */
- else {
- now = mstime();
- }
-
- /* The key expired if the current (virtual or real) time is greater
- * than the expire time of the key. */
- return now > when;
-}
-/* Return the expire time of the specified key, or -1 if no expire
- * is associated with this key (i.e. the key is non volatile) */
-long long getExpire(redisDb *db, robj *key) {
- dictEntry *de;
-
- /* No expire? return ASAP */
- if (dictSize(db->expires) == 0 ||
- (de = dictFind(db->expires,key->ptr)) == NULL) return -1;
-
- /* The entry was found in the expire dict, this means it should also
- * be present in the main dict (safety check). */
- serverAssertWithInfo(NULL,key,dictFind(db->dict,key->ptr) != NULL);
- return dictGetSignedIntegerVal(de);
-}
/* This function handles 'background' operations we are required to do
- * incrementally in Redis databases, such as active key expiring, resizing,
- * rehashing. */
-void databasesCron(void) {
- /* Expire keys by random sampling. Not required for slaves
- * as master will synthesize DELs for us. */
- if (server.active_expire_enabled) {
- if (server.masterhost == NULL) {
- activeExpireCycle(ACTIVE_EXPIRE_CYCLE_SLOW);
- } else {
- expireSlaveKeys();
- }
- }
-
- /* Defrag keys gradually. */
- activeDefragCycle();
-
- /* Perform hash tables rehashing if needed, but only if there are no
- * other processes saving the DB on disk. Otherwise rehashing is bad
- * as will cause a lot of copy-on-write of memory pages. */
- if (!hasActiveChildProcess()) {
- /* We use global counters so if we stop the computation at a given
- * DB we'll be able to start from the successive in the next
- * cron loop iteration. */
- static unsigned int resize_db = 0;
- static unsigned int rehash_db = 0;
- int dbs_per_call = CRON_DBS_PER_CALL;
- int j;
-
- /* Don't test more DBs than we have. */
- if (dbs_per_call > server.dbnum) dbs_per_call = server.dbnum;
-
- /* Resize */
- for (j = 0; j < dbs_per_call; j++) {
- tryResizeHashTables(resize_db % server.dbnum);
- resize_db++;
- }
-
- /* Rehash */
- if (server.activerehashing) {
- for (j = 0; j < dbs_per_call; j++) {
- int work_done = incrementallyRehash(rehash_db);
- if (work_done) {
- /* If the function did some work, stop here, we'll do
- * more at the next cron loop. */
- break;
- } else {
- /* If this db didn't need rehash, we'll try the next one. */
- rehash_db++;
- rehash_db %= server.dbnum;
- }
- }
- }
- }
-}
-/* Try to expire a few timed out keys. The algorithm used is adaptive and
- * will use few CPU cycles if there are few expiring keys, otherwise
- * it will get more aggressive to avoid that too much memory is used by
- * keys that can be removed from the keyspace.
- *
- * Every expire cycle tests multiple databases: the next call will start
- * again from the next db, with the exception of exists for time limit: in that
- * case we restart again from the last database we were processing. Anyway
- * no more than CRON_DBS_PER_CALL databases are tested at every iteration.
- *
- * The function can perform more or less work, depending on the "type"
- * argument. It can execute a "fast cycle" or a "slow cycle". The slow
- * cycle is the main way we collect expired cycles: this happens with
- * the "server.hz" frequency (usually 10 hertz).
- *
- * However the slow cycle can exit for timeout, since it used too much time.
- * For this reason the function is also invoked to perform a fast cycle
- * at every event loop cycle, in the beforeSleep() function. The fast cycle
- * will try to perform less work, but will do it much more often.
- *
- * The following are the details of the two expire cycles and their stop
- * conditions:
- *
- * If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a
- * "fast" expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION
- * microseconds, and is not repeated again before the same amount of time.
- * The cycle will also refuse to run at all if the latest slow cycle did not
- * terminate because of a time limit condition.
- *
- * If type is ACTIVE_EXPIRE_CYCLE_SLOW, that normal expire cycle is
- * executed, where the time limit is a percentage of the REDIS_HZ period
- * as specified by the ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC define. In the
- * fast cycle, the check of every database is interrupted once the number
- * of already expired keys in the database is estimated to be lower than
- * a given percentage, in order to avoid doing too much work to gain too
- * little memory.
- *
- * The configured expire "effort" will modify the baseline parameters in
- * order to do more work in both the fast and slow expire cycles.
- */
-
-#define ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP 20 /* Keys for each DB loop. */
-#define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000 /* Microseconds. */
-#define ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC 25 /* Max % of CPU to use. */
-#define ACTIVE_EXPIRE_CYCLE_ACCEPTABLE_STALE 10 /* % of stale keys after which
- we do extra efforts. */
-void activeExpireCycle(int type) {
- /* Adjust the running parameters according to the configured expire
- * effort. The default effort is 1, and the maximum configurable effort
- * is 10. */
- unsigned long
- effort = server.active_expire_effort-1, /* Rescale from 0 to 9. */
- config_keys_per_loop = ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP +
- ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP/4*effort,
- config_cycle_fast_duration = ACTIVE_EXPIRE_CYCLE_FAST_DURATION +
- ACTIVE_EXPIRE_CYCLE_FAST_DURATION/4*effort,
- config_cycle_slow_time_perc = ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC +
- 2*effort,
- config_cycle_acceptable_stale = ACTIVE_EXPIRE_CYCLE_ACCEPTABLE_STALE-
- effort;
-
- /* This function has some global state in order to continue the work
- * incrementally across calls. */
- static unsigned int current_db = 0; /* Last DB tested. */
- static int timelimit_exit = 0; /* Time limit hit in previous call? */
- static long long last_fast_cycle = 0; /* When last fast cycle ran. */
-
- int j, iteration = 0;
- int dbs_per_call = CRON_DBS_PER_CALL;
- long long start = ustime(), timelimit, elapsed;
-
- /* When clients are paused the dataset should be static not just from the
- * POV of clients not being able to write, but also from the POV of
- * expires and evictions of keys not being performed. */
- if (clientsArePaused()) return;
-
- if (type == ACTIVE_EXPIRE_CYCLE_FAST) {
- /* Don't start a fast cycle if the previous cycle did not exit
- * for time limit, unless the percentage of estimated stale keys is
- * too high. Also never repeat a fast cycle for the same period
- * as the fast cycle total duration itself. */
- if (!timelimit_exit &&
- server.stat_expired_stale_perc < config_cycle_acceptable_stale)
- return;
-
- if (start < last_fast_cycle + (long long)config_cycle_fast_duration*2)
- return;
-
- last_fast_cycle = start;
- }
-
- /* We usually should test CRON_DBS_PER_CALL per iteration, with
- * two exceptions:
- *
- * 1) Don't test more DBs than we have.
- * 2) If last time we hit the time limit, we want to scan all DBs
- * in this iteration, as there is work to do in some DB and we don't want
- * expired keys to use memory for too much time. */
- if (dbs_per_call > server.dbnum || timelimit_exit)
- dbs_per_call = server.dbnum;
+
当 $loggable 是 0 或者空时表示 if 条件为否,上面的默认就是 1,只有当请求状态 status 是 2xx 或 3xx 时才是 0,代表不用记录,有了这个特性就可以更灵活地配置日志
这里引用了 redis 在 github 上最早的 2.2 版本的代码,代码路径是https://github.com/antirez/redis/blob/2.2/src/sds.h,可以看到这个结构体里只有仨元素,两个 int 型和一个 char 型数组,两个 int 型其实就是我说的优化,因为 C 语言本身的字符串数组,有两个问题,一个是要知道它实际已被占用的长度,需要去遍历这个数组,第二个就是比较容易踩坑的是遍历的时候要注意它有个以\0作为结尾的特点;通过上面的两个 int 型参数,一个是知道字符串目前的长度,一个是知道字符串还剩余多少位空间,这样子坐着两个操作从 O(N)简化到了O(1)了,还有第二个 free 还有个比较重要的作用就是能防止 C 字符串的溢出问题,在存储之前可以先判断 free 长度,如果长度不够就先扩容了,先介绍到这,这个系列可以写蛮多的,慢慢介绍吧
+
链表
链表是比较常见的数据结构了,但是因为 redis 是用 C 写的,所以在不依赖第三方库的情况下只能自己写一个了,redis 的链表是个有头的链表,而且是无环的,具体的结构我也找了 github 上最早版本的代码
+
typedef struct listNode {
+ // 前置节点
+ struct listNode *prev;
+ // 后置节点
+ struct listNode *next;
+ // 值
+ void *value;
+} listNode;
- /* We can use at max 'config_cycle_slow_time_perc' percentage of CPU
- * time per iteration. Since this function gets called with a frequency of
- * server.hz times per second, the following is the max amount of
- * microseconds we can spend in this function. */
- timelimit = config_cycle_slow_time_perc*1000000/server.hz/100;
- timelimit_exit = 0;
- if (timelimit <= 0) timelimit = 1;
+typedef struct list {
+ // 链表表头
+ listNode *head;
+ // 当前节点,也可以说是最后节点
+ listNode *tail;
+ // 节点复制函数
+ void *(*dup)(void *ptr);
+ // 节点值释放函数
+ void (*free)(void *ptr);
+ // 节点值比较函数
+ int (*match)(void *ptr, void *key);
+ // 链表包含的节点数量
+ unsigned int len;
+} list;
字典也是个常用的数据结构,其实只是叫法不同,数据结构中叫 hash 散列,Java 中叫 Map,PHP 中是数组 array,Python 中也叫字典 dict,因为纯 C 语言本身不带这些数据结构,所以这也是个痛并快乐着的过程,享受 C 语言的高性能的同时也要接受它只提供了语言的基本功能的现实,各种轮子都需要自己造,redis 同样实现了自己的字典 下面来看看代码
+
typedef struct dictEntry {
+ void *key;
+ void *val;
+ struct dictEntry *next;
+} dictEntry;
- if (type == ACTIVE_EXPIRE_CYCLE_FAST)
- timelimit = config_cycle_fast_duration; /* in microseconds. */
+typedef struct dictType {
+ unsigned int (*hashFunction)(const void *key);
+ void *(*keyDup)(void *privdata, const void *key);
+ void *(*valDup)(void *privdata, const void *obj);
+ int (*keyCompare)(void *privdata, const void *key1, const void *key2);
+ void (*keyDestructor)(void *privdata, void *key);
+ void (*valDestructor)(void *privdata, void *obj);
+} dictType;
- /* Accumulate some global stats as we expire keys, to have some idea
- * about the number of keys that are already logically expired, but still
- * existing inside the database. */
- long total_sampled = 0;
- long total_expired = 0;
+/* This is our hash table structure. Every dictionary has two of this as we
+ * implement incremental rehashing, for the old to the new table. */
+typedef struct dictht {
+ dictEntry **table;
+ unsigned long size;
+ unsigned long sizemask;
+ unsigned long used;
+} dictht;
- for (j = 0; j < dbs_per_call && timelimit_exit == 0; j++) {
- /* Expired and checked in a single loop. */
- unsigned long expired, sampled;
+typedef struct dict {
+ dictType *type;
+ void *privdata;
+ dictht ht[2];
+ int rehashidx; /* rehashing not in progress if rehashidx == -1 */
+ int iterators; /* number of iterators currently running */
+} dict;
Flexible array members1 were introduced in the C99 standard of the C programming language (in particular, in section §6.7.2.1, item 16, page 103).2 It is a member of a struct, which is an array without a given dimension. It must be the last member of such a struct and it must be accompanied by at least one other member, as in the following example:
+
+
struct vectord {
+ size_t len;
+ double arr[]; // the flexible array member must be last
+};
/* ZSETs use a specialized version of Skiplists */
+typedef struct zskiplistNode {
+ sds ele;
+ double score;
+ struct zskiplistNode *backward;
+ struct zskiplistLevel {
+ struct zskiplistNode *forward;
+ unsigned long span;
+ } level[];
+} zskiplistNode;
- /* Increment the DB now so we are sure if we run out of time
- * in the current DB we'll restart from the next. This allows to
- * distribute the time evenly across DBs. */
- current_db++;
+typedef struct zskiplist {
+ struct zskiplistNode *header, *tail;
+ unsigned long length;
+ int level;
+} zskiplist;
- /* Continue to expire if at the end of the cycle more than 25%
- * of the keys were expired. */
- do {
- unsigned long num, slots;
- long long now, ttl_sum;
- int ttl_samples;
- iteration++;
+typedef struct zset {
+ dict *dict;
+ zskiplist *zsl;
+} zset;
/* quicklistNode is a 32 byte struct describing a ziplist for a quicklist.
+ * We use bit fields keep the quicklistNode at 32 bytes.
+ * count: 16 bits, max 65536 (max zl bytes is 65k, so max count actually < 32k).
+ * encoding: 2 bits, RAW=1, LZF=2.
+ * container: 2 bits, NONE=1, ZIPLIST=2.
+ * recompress: 1 bit, bool, true if node is temporarry decompressed for usage.
+ * attempted_compress: 1 bit, boolean, used for verifying during testing.
+ * extra: 10 bits, free for future use; pads out the remainder of 32 bits */
+typedef struct quicklistNode {
+ struct quicklistNode *prev;
+ struct quicklistNode *next;
+ unsigned char *zl;
+ unsigned int sz; /* ziplist size in bytes */
+ unsigned int count : 16; /* count of items in ziplist */
+ unsigned int encoding : 2; /* RAW==1 or LZF==2 */
+ unsigned int container : 2; /* NONE==1 or ZIPLIST==2 */
+ unsigned int recompress : 1; /* was this node previous compressed? */
+ unsigned int attempted_compress : 1; /* node can't compress; too small */
+ unsigned int extra : 10; /* more bits to steal for future usage */
+} quicklistNode;
- /* If there is nothing to expire try next DB ASAP. */
- if ((num = dictSize(db->expires)) == 0) {
- db->avg_ttl = 0;
- break;
- }
- slots = dictSlots(db->expires);
- now = mstime();
+/* quicklistLZF is a 4+N byte struct holding 'sz' followed by 'compressed'.
+ * 'sz' is byte length of 'compressed' field.
+ * 'compressed' is LZF data with total (compressed) length 'sz'
+ * NOTE: uncompressed length is stored in quicklistNode->sz.
+ * When quicklistNode->zl is compressed, node->zl points to a quicklistLZF */
+typedef struct quicklistLZF {
+ unsigned int sz; /* LZF size in bytes*/
+ char compressed[];
+} quicklistLZF;
- /* When there are less than 1% filled slots, sampling the key
- * space is expensive, so stop here waiting for better times...
- * The dictionary will be resized asap. */
- if (num && slots > DICT_HT_INITIAL_SIZE &&
- (num*100/slots < 1)) break;
+/* quicklist is a 40 byte struct (on 64-bit systems) describing a quicklist.
+ * 'count' is the number of total entries.
+ * 'len' is the number of quicklist nodes.
+ * 'compress' is: -1 if compression disabled, otherwise it's the number
+ * of quicklistNodes to leave uncompressed at ends of quicklist.
+ * 'fill' is the user-requested (or default) fill factor. */
+typedef struct quicklist {
+ quicklistNode *head;
+ quicklistNode *tail;
+ unsigned long count; /* total count of all entries in all ziplists */
+ unsigned long len; /* number of quicklistNodes */
+ int fill : 16; /* fill factor for individual nodes */
+ unsigned int compress : 16; /* depth of end nodes not to compress;0=off */
+} quicklist;
size_t offset = (-fill) - 1;
+if (offset < (sizeof(optimization_level) / sizeof(*optimization_level))) {
+ if (sz <= optimization_level[offset]) {
+ return 1;
+ } else {
+ return 0;
+ }
+} else {
+ return 0;
+}
- /* The main collection cycle. Sample random keys among keys
- * with an expire set, checking for expired ones. */
- expired = 0;
- sampled = 0;
- ttl_sum = 0;
- ttl_samples = 0;
+/* Optimization levels for size-based filling */
+static const size_t optimization_level[] = {4096, 8192, 16384, 32768, 65536};
- if (num > config_keys_per_loop)
- num = config_keys_per_loop;
+/* Create a new quicklist.
+ * Free with quicklistRelease(). */
+quicklist *quicklistCreate(void) {
+ struct quicklist *quicklist;
- /* Here we access the low level representation of the hash table
- * for speed concerns: this makes this code coupled with dict.c,
- * but it hardly changed in ten years.
- *
- * Note that certain places of the hash table may be empty,
- * so we want also a stop condition about the number of
- * buckets that we scanned. However scanning for free buckets
- * is very fast: we are in the cache line scanning a sequential
- * array of NULL pointers, so we can scan a lot more buckets
- * than keys in the same time. */
- long max_buckets = num*20;
- long checked_buckets = 0;
+ quicklist = zmalloc(sizeof(*quicklist));
+ quicklist->head = quicklist->tail = NULL;
+ quicklist->len = 0;
+ quicklist->count = 0;
+ quicklist->compress = 0;
+ quicklist->fill = -2;
+ return quicklist;
+}
/* The actual Redis Object */
+#define OBJ_STRING 0 /* String object. */
+#define OBJ_LIST 1 /* List object. */
+#define OBJ_SET 2 /* Set object. */
+#define OBJ_ZSET 3 /* Sorted set object. */
+#define OBJ_HASH 4 /* Hash object. */
+/*
+ * Objects encoding. Some kind of objects like Strings and Hashes can be
+ * internally represented in multiple ways. The 'encoding' field of the object
+ * is set to one of this fields for this object. */
+#define OBJ_ENCODING_RAW 0 /* Raw representation */
+#define OBJ_ENCODING_INT 1 /* Encoded as integer */
+#define OBJ_ENCODING_HT 2 /* Encoded as hash table */
+#define OBJ_ENCODING_ZIPMAP 3 /* Encoded as zipmap */
+#define OBJ_ENCODING_LINKEDLIST 4 /* No longer used: old list encoding. */
+#define OBJ_ENCODING_ZIPLIST 5 /* Encoded as ziplist */
+#define OBJ_ENCODING_INTSET 6 /* Encoded as intset */
+#define OBJ_ENCODING_SKIPLIST 7 /* Encoded as skiplist */
+#define OBJ_ENCODING_EMBSTR 8 /* Embedded sds string encoding */
+#define OBJ_ENCODING_QUICKLIST 9 /* Encoded as linked list of ziplists */
+#define OBJ_ENCODING_STREAM 10 /* Encoded as a radix tree of listpacks */
+
+#define LRU_BITS 24
+#define LRU_CLOCK_MAX ((1<<LRU_BITS)-1) /* Max value of obj->lru */
+#define LRU_CLOCK_RESOLUTION 1000 /* LRU clock resolution in ms */
+
+#define OBJ_SHARED_REFCOUNT INT_MAX
+typedef struct redisObject {
unsigned type:4;
unsigned encoding:4;
unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or
@@ -9721,521 +9210,568 @@ timelimit = config_cycle_slow_time_perc*1000000/server.hz/100;
* |00pppppp| - 1 byte
+* String value with length less than or equal to 63 bytes (6 bits).
+* "pppppp" represents the unsigned 6 bit length.
+* |01pppppp|qqqqqqqq| - 2 bytes
+* String value with length less than or equal to 16383 bytes (14 bits).
+* IMPORTANT: The 14 bit number is stored in big endian.
+* |10000000|qqqqqqqq|rrrrrrrr|ssssssss|tttttttt| - 5 bytes
+* String value with length greater than or equal to 16384 bytes.
+* Only the 4 bytes following the first byte represents the length
+* up to 32^2-1. The 6 lower bits of the first byte are not used and
+* are set to zero.
+* IMPORTANT: The 32 bit number is stored in big endian.
+* |11000000| - 3 bytes
+* Integer encoded as int16_t (2 bytes).
+* |11010000| - 5 bytes
+* Integer encoded as int32_t (4 bytes).
+* |11100000| - 9 bytes
+* Integer encoded as int64_t (8 bytes).
+* |11110000| - 4 bytes
+* Integer encoded as 24 bit signed (3 bytes).
+* |11111110| - 2 bytes
+* Integer encoded as 8 bit signed (1 byte).
+* |1111xxxx| - (with xxxx between 0000 and 1101) immediate 4 bit integer.
+* Unsigned integer from 0 to 12. The encoded value is actually from
+* 1 to 13 because 0000 and 1111 can not be used, so 1 should be
+* subtracted from the encoded 4 bit value to obtain the right value.
+* |11111111| - End of ziplist special entry.
/* This function is called when we are going to perform some operation
+ * in a given key, but such key may be already logically expired even if
+ * it still exists in the database. The main way this function is called
+ * is via lookupKey*() family of functions.
+ *
+ * The behavior of the function depends on the replication role of the
+ * instance, because slave instances do not expire keys, they wait
+ * for DELs from the master for consistency matters. However even
+ * slaves will try to have a coherent return value for the function,
+ * so that read commands executed in the slave side will be able to
+ * behave like if the key is expired even if still present (because the
+ * master has yet to propagate the DEL).
+ *
+ * In masters as a side effect of finding a key which is expired, such
+ * key will be evicted from the database. Also this may trigger the
+ * propagation of a DEL/UNLINK command in AOF / replication stream.
+ *
+ * The return value of the function is 0 if the key is still valid,
+ * otherwise the function returns 1 if the key is expired. */
+int expireIfNeeded(redisDb *db, robj *key) {
+ if (!keyIsExpired(db,key)) return 0;
- /* Update the access time for the ageing algorithm.
- * Don't do it if we have a saving child, as this will trigger
- * a copy on write madness. */
- if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)){
- if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
- // 这个是后面一节的内容
- updateLFU(val);
- } else {
- // 对于这个分支,访问时就会去更新 lru 值
- val->lru = LRU_CLOCK();
- }
- }
- return val;
- } else {
- return NULL;
+ /* If we are running in the context of a slave, instead of
+ * evicting the expired key from the database, we return ASAP:
+ * the slave key expiration is controlled by the master that will
+ * send us synthesized DEL operations for expired keys.
+ *
+ * Still we try to return the right information to the caller,
+ * that is, 0 if we think the key should be still valid, 1 if
+ * we think the key is expired at this time. */
+ if (server.masterhost != NULL) return 1;
+
+ /* Delete the key */
+ server.stat_expiredkeys++;
+ propagateExpire(db,key,server.lazyfree_lazy_expire);
+ notifyKeyspaceEvent(NOTIFY_EXPIRED,
+ "expired",key,db->id);
+ return server.lazyfree_lazy_expire ? dbAsyncDelete(db,key) :
+ dbSyncDelete(db,key);
+}
+
+/* Check if the key is expired. */
+int keyIsExpired(redisDb *db, robj *key) {
+ mstime_t when = getExpire(db,key);
+ mstime_t now;
+
+ if (when < 0) return 0; /* No expire for this key */
+
+ /* Don't expire anything while loading. It will be done later. */
+ if (server.loading) return 0;
+
+ /* If we are in the context of a Lua script, we pretend that time is
+ * blocked to when the Lua script started. This way a key can expire
+ * only the first time it is accessed and not in the middle of the
+ * script execution, making propagation to slaves / AOF consistent.
+ * See issue #1525 on Github for more information. */
+ if (server.lua_caller) {
+ now = server.lua_time_start;
+ }
+ /* If we are in the middle of a command execution, we still want to use
+ * a reference time that does not change: in that case we just use the
+ * cached time, that we update before each call in the call() function.
+ * This way we avoid that commands such as RPOPLPUSH or similar, that
+ * may re-open the same key multiple times, can invalidate an already
+ * open object in a next call, if the next call will see the key expired,
+ * while the first did not. */
+ else if (server.fixed_time_expire > 0) {
+ now = server.mstime;
+ }
+ /* For the other cases, we want to use the most fresh time we have. */
+ else {
+ now = mstime();
}
+
+ /* The key expired if the current (virtual or real) time is greater
+ * than the expire time of the key. */
+ return now > when;
}
-/* This function is used to obtain the current LRU clock.
- * If the current resolution is lower than the frequency we refresh the
- * LRU clock (as it should be in production servers) we return the
- * precomputed value, otherwise we need to resort to a system call. */
-unsigned int LRU_CLOCK(void) {
- unsigned int lruclock;
- if (1000/server.hz <= LRU_CLOCK_RESOLUTION) {
- // 如果服务器的频率server.hz大于 1 时就是用系统预设的 lruclock
- lruclock = server.lruclock;
- } else {
- lruclock = getLRUClock();
+/* Return the expire time of the specified key, or -1 if no expire
+ * is associated with this key (i.e. the key is non volatile) */
+long long getExpire(redisDb *db, robj *key) {
+ dictEntry *de;
+
+ /* No expire? return ASAP */
+ if (dictSize(db->expires) == 0 ||
+ (de = dictFind(db->expires,key->ptr)) == NULL) return -1;
+
+ /* The entry was found in the expire dict, this means it should also
+ * be present in the main dict (safety check). */
+ serverAssertWithInfo(NULL,key,dictFind(db->dict,key->ptr) != NULL);
+ return dictGetSignedIntegerVal(de);
+}
/* This function handles 'background' operations we are required to do
+ * incrementally in Redis databases, such as active key expiring, resizing,
+ * rehashing. */
+void databasesCron(void) {
+ /* Expire keys by random sampling. Not required for slaves
+ * as master will synthesize DELs for us. */
+ if (server.active_expire_enabled) {
+ if (server.masterhost == NULL) {
+ activeExpireCycle(ACTIVE_EXPIRE_CYCLE_SLOW);
+ } else {
+ expireSlaveKeys();
+ }
}
- return lruclock;
-}
-/* Return the LRU clock, based on the clock resolution. This is a time
- * in a reduced-bits format that can be used to set and check the
- * object->lru field of redisObject structures. */
-unsigned int getLRUClock(void) {
- return (mstime()/LRU_CLOCK_RESOLUTION) & LRU_CLOCK_MAX;
-}
/* If this function gets called we already read a whole
- * command, arguments are in the client argv/argc fields.
- * processCommand() execute the command or prepare the
- * server for a bulk read from the client.
- *
- * If C_OK is returned the client is still alive and valid and
- * other operations can be performed by the caller. Otherwise
- * if C_ERR is returned the client was destroyed (i.e. after QUIT). */
-int processCommand(client *c) {
- moduleCallCommandFilters(c);
-
+ /* Defrag keys gradually. */
+ activeDefragCycle();
- /* Handle the maxmemory directive.
- *
- * Note that we do not want to reclaim memory if we are here re-entering
- * the event loop since there is a busy Lua script running in timeout
- * condition, to avoid mixing the propagation of scripts with the
- * propagation of DELs due to eviction. */
- if (server.maxmemory && !server.lua_timedout) {
- int out_of_memory = freeMemoryIfNeededAndSafe() == C_ERR;
- /* freeMemoryIfNeeded may flush slave output buffers. This may result
- * into a slave, that may be the active client, to be freed. */
- if (server.current_client == NULL) return C_ERR;
+ /* Perform hash tables rehashing if needed, but only if there are no
+ * other processes saving the DB on disk. Otherwise rehashing is bad
+ * as will cause a lot of copy-on-write of memory pages. */
+ if (!hasActiveChildProcess()) {
+ /* We use global counters so if we stop the computation at a given
+ * DB we'll be able to start from the successive in the next
+ * cron loop iteration. */
+ static unsigned int resize_db = 0;
+ static unsigned int rehash_db = 0;
+ int dbs_per_call = CRON_DBS_PER_CALL;
+ int j;
- /* It was impossible to free enough memory, and the command the client
- * is trying to execute is denied during OOM conditions or the client
- * is in MULTI/EXEC context? Error. */
- if (out_of_memory &&
- (c->cmd->flags & CMD_DENYOOM ||
- (c->flags & CLIENT_MULTI &&
- c->cmd->proc != execCommand &&
- c->cmd->proc != discardCommand)))
- {
- flagTransaction(c);
- addReply(c, shared.oomerr);
- return C_OK;
+ /* Don't test more DBs than we have. */
+ if (dbs_per_call > server.dbnum) dbs_per_call = server.dbnum;
+
+ /* Resize */
+ for (j = 0; j < dbs_per_call; j++) {
+ tryResizeHashTables(resize_db % server.dbnum);
+ resize_db++;
+ }
+
+ /* Rehash */
+ if (server.activerehashing) {
+ for (j = 0; j < dbs_per_call; j++) {
+ int work_done = incrementallyRehash(rehash_db);
+ if (work_done) {
+ /* If the function did some work, stop here, we'll do
+ * more at the next cron loop. */
+ break;
+ } else {
+ /* If this db didn't need rehash, we'll try the next one. */
+ rehash_db++;
+ rehash_db %= server.dbnum;
+ }
+ }
}
}
-}
/* This is a wrapper for freeMemoryIfNeeded() that only really calls the
- * function if right now there are the conditions to do so safely:
+}
+/* Try to expire a few timed out keys. The algorithm used is adaptive and
+ * will use few CPU cycles if there are few expiring keys, otherwise
+ * it will get more aggressive to avoid that too much memory is used by
+ * keys that can be removed from the keyspace.
*
- * - There must be no script in timeout condition.
- * - Nor we are loading data right now.
+ * Every expire cycle tests multiple databases: the next call will start
+ * again from the next db, with the exception of exists for time limit: in that
+ * case we restart again from the last database we were processing. Anyway
+ * no more than CRON_DBS_PER_CALL databases are tested at every iteration.
*
- */
-int freeMemoryIfNeededAndSafe(void) {
- if (server.lua_timedout || server.loading) return C_OK;
- return freeMemoryIfNeeded();
-}
-/* This function is periodically called to see if there is memory to free
- * according to the current "maxmemory" settings. In case we are over the
- * memory limit, the function will try to free some memory to return back
- * under the limit.
+ * The function can perform more or less work, depending on the "type"
+ * argument. It can execute a "fast cycle" or a "slow cycle". The slow
+ * cycle is the main way we collect expired cycles: this happens with
+ * the "server.hz" frequency (usually 10 hertz).
*
- * The function returns C_OK if we are under the memory limit or if we
- * were over the limit, but the attempt to free memory was successful.
- * Otehrwise if we are over the memory limit, but not enough memory
- * was freed to return back under the limit, the function returns C_ERR. */
-int freeMemoryIfNeeded(void) {
- int keys_freed = 0;
- /* By default replicas should ignore maxmemory
- * and just be masters exact copies. */
- if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
+ * However the slow cycle can exit for timeout, since it used too much time.
+ * For this reason the function is also invoked to perform a fast cycle
+ * at every event loop cycle, in the beforeSleep() function. The fast cycle
+ * will try to perform less work, but will do it much more often.
+ *
+ * The following are the details of the two expire cycles and their stop
+ * conditions:
+ *
+ * If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a
+ * "fast" expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION
+ * microseconds, and is not repeated again before the same amount of time.
+ * The cycle will also refuse to run at all if the latest slow cycle did not
+ * terminate because of a time limit condition.
+ *
+ * If type is ACTIVE_EXPIRE_CYCLE_SLOW, that normal expire cycle is
+ * executed, where the time limit is a percentage of the REDIS_HZ period
+ * as specified by the ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC define. In the
+ * fast cycle, the check of every database is interrupted once the number
+ * of already expired keys in the database is estimated to be lower than
+ * a given percentage, in order to avoid doing too much work to gain too
+ * little memory.
+ *
+ * The configured expire "effort" will modify the baseline parameters in
+ * order to do more work in both the fast and slow expire cycles.
+ */
- size_t mem_reported, mem_tofree, mem_freed;
- mstime_t latency, eviction_latency;
- long long delta;
- int slaves = listLength(server.slaves);
+#define ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP 20 /* Keys for each DB loop. */
+#define ACTIVE_EXPIRE_CYCLE_FAST_DURATION 1000 /* Microseconds. */
+#define ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC 25 /* Max % of CPU to use. */
+#define ACTIVE_EXPIRE_CYCLE_ACCEPTABLE_STALE 10 /* % of stale keys after which
+ we do extra efforts. */
+void activeExpireCycle(int type) {
+ /* Adjust the running parameters according to the configured expire
+ * effort. The default effort is 1, and the maximum configurable effort
+ * is 10. */
+ unsigned long
+ effort = server.active_expire_effort-1, /* Rescale from 0 to 9. */
+ config_keys_per_loop = ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP +
+ ACTIVE_EXPIRE_CYCLE_KEYS_PER_LOOP/4*effort,
+ config_cycle_fast_duration = ACTIVE_EXPIRE_CYCLE_FAST_DURATION +
+ ACTIVE_EXPIRE_CYCLE_FAST_DURATION/4*effort,
+ config_cycle_slow_time_perc = ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC +
+ 2*effort,
+ config_cycle_acceptable_stale = ACTIVE_EXPIRE_CYCLE_ACCEPTABLE_STALE-
+ effort;
+
+ /* This function has some global state in order to continue the work
+ * incrementally across calls. */
+ static unsigned int current_db = 0; /* Last DB tested. */
+ static int timelimit_exit = 0; /* Time limit hit in previous call? */
+ static long long last_fast_cycle = 0; /* When last fast cycle ran. */
+
+ int j, iteration = 0;
+ int dbs_per_call = CRON_DBS_PER_CALL;
+ long long start = ustime(), timelimit, elapsed;
/* When clients are paused the dataset should be static not just from the
* POV of clients not being able to write, but also from the POV of
* expires and evictions of keys not being performed. */
- if (clientsArePaused()) return C_OK;
- if (getMaxmemoryState(&mem_reported,NULL,&mem_tofree,NULL) == C_OK)
- return C_OK;
-
- mem_freed = 0;
-
- if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
- goto cant_free; /* We need to free memory, but policy forbids. */
-
- latencyStartMonitor(latency);
- while (mem_freed < mem_tofree) {
- int j, k, i;
- static unsigned int next_db = 0;
- sds bestkey = NULL;
- int bestdbid;
- redisDb *db;
- dict *dict;
- dictEntry *de;
-
- if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
- server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL)
- {
- struct evictionPoolEntry *pool = EvictionPoolLRU;
-
- while(bestkey == NULL) {
- unsigned long total_keys = 0, keys;
-
- /* We don't want to make local-db choices when expiring keys,
- * so to start populate the eviction pool sampling keys from
- * every DB. */
- for (i = 0; i < server.dbnum; i++) {
- db = server.db+i;
- dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ?
- db->dict : db->expires;
- if ((keys = dictSize(dict)) != 0) {
- evictionPoolPopulate(i, dict, db->dict, pool);
- total_keys += keys;
- }
- }
- if (!total_keys) break; /* No keys to evict. */
+ if (clientsArePaused()) return;
- /* Go backward from best to worst element to evict. */
- for (k = EVPOOL_SIZE-1; k >= 0; k--) {
- if (pool[k].key == NULL) continue;
- bestdbid = pool[k].dbid;
+ if (type == ACTIVE_EXPIRE_CYCLE_FAST) {
+ /* Don't start a fast cycle if the previous cycle did not exit
+ * for time limit, unless the percentage of estimated stale keys is
+ * too high. Also never repeat a fast cycle for the same period
+ * as the fast cycle total duration itself. */
+ if (!timelimit_exit &&
+ server.stat_expired_stale_perc < config_cycle_acceptable_stale)
+ return;
- if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
- de = dictFind(server.db[pool[k].dbid].dict,
- pool[k].key);
- } else {
- de = dictFind(server.db[pool[k].dbid].expires,
- pool[k].key);
- }
+ if (start < last_fast_cycle + (long long)config_cycle_fast_duration*2)
+ return;
- /* Remove the entry from the pool. */
- if (pool[k].key != pool[k].cached)
- sdsfree(pool[k].key);
- pool[k].key = NULL;
- pool[k].idle = 0;
+ last_fast_cycle = start;
+ }
- /* If the key exists, is our pick. Otherwise it is
- * a ghost and we need to try the next element. */
- if (de) {
- bestkey = dictGetKey(de);
- break;
- } else {
- /* Ghost... Iterate again. */
- }
- }
- }
- }
+ /* We usually should test CRON_DBS_PER_CALL per iteration, with
+ * two exceptions:
+ *
+ * 1) Don't test more DBs than we have.
+ * 2) If last time we hit the time limit, we want to scan all DBs
+ * in this iteration, as there is work to do in some DB and we don't want
+ * expired keys to use memory for too much time. */
+ if (dbs_per_call > server.dbnum || timelimit_exit)
+ dbs_per_call = server.dbnum;
- /* volatile-random and allkeys-random policy */
- else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
- server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
- {
- /* When evicting a random key, we try to evict a key for
- * each DB, so we use the static 'next_db' variable to
- * incrementally visit all DBs. */
- for (i = 0; i < server.dbnum; i++) {
- j = (++next_db) % server.dbnum;
- db = server.db+j;
- dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
- db->dict : db->expires;
- if (dictSize(dict) != 0) {
- de = dictGetRandomKey(dict);
- bestkey = dictGetKey(de);
- bestdbid = j;
- break;
- }
- }
- }
+ /* We can use at max 'config_cycle_slow_time_perc' percentage of CPU
+ * time per iteration. Since this function gets called with a frequency of
+ * server.hz times per second, the following is the max amount of
+ * microseconds we can spend in this function. */
+ timelimit = config_cycle_slow_time_perc*1000000/server.hz/100;
+ timelimit_exit = 0;
+ if (timelimit <= 0) timelimit = 1;
- /* Finally remove the selected key. */
- if (bestkey) {
- db = server.db+bestdbid;
- robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
- propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
- /* We compute the amount of memory freed by db*Delete() alone.
- * It is possible that actually the memory needed to propagate
- * the DEL in AOF and replication link is greater than the one
- * we are freeing removing the key, but we can't account for
- * that otherwise we would never exit the loop.
- *
- * AOF and Output buffer memory will be freed eventually so
- * we only care about memory used by the key space. */
- delta = (long long) zmalloc_used_memory();
- latencyStartMonitor(eviction_latency);
- if (server.lazyfree_lazy_eviction)
- dbAsyncDelete(db,keyobj);
- else
- dbSyncDelete(db,keyobj);
- latencyEndMonitor(eviction_latency);
- latencyAddSampleIfNeeded("eviction-del",eviction_latency);
- latencyRemoveNestedEvent(latency,eviction_latency);
- delta -= (long long) zmalloc_used_memory();
- mem_freed += delta;
- server.stat_evictedkeys++;
- notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
- keyobj, db->id);
- decrRefCount(keyobj);
- keys_freed++;
+ if (type == ACTIVE_EXPIRE_CYCLE_FAST)
+ timelimit = config_cycle_fast_duration; /* in microseconds. */
- /* When the memory to free starts to be big enough, we may
- * start spending so much time here that is impossible to
- * deliver data to the slaves fast enough, so we force the
- * transmission here inside the loop. */
- if (slaves) flushSlavesOutputBuffers();
+ /* Accumulate some global stats as we expire keys, to have some idea
+ * about the number of keys that are already logically expired, but still
+ * existing inside the database. */
+ long total_sampled = 0;
+ long total_expired = 0;
- /* Normally our stop condition is the ability to release
- * a fixed, pre-computed amount of memory. However when we
- * are deleting objects in another thread, it's better to
- * check, from time to time, if we already reached our target
- * memory, since the "mem_freed" amount is computed only
- * across the dbAsyncDelete() call, while the thread can
- * release the memory all the time. */
- if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) {
- if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) {
- /* Let's satisfy our stop condition. */
- mem_freed = mem_tofree;
- }
- }
- } else {
- latencyEndMonitor(latency);
- latencyAddSampleIfNeeded("eviction-cycle",latency);
- goto cant_free; /* nothing to free... */
- }
- }
- latencyEndMonitor(latency);
- latencyAddSampleIfNeeded("eviction-cycle",latency);
- return C_OK;
+ for (j = 0; j < dbs_per_call && timelimit_exit == 0; j++) {
+ /* Expired and checked in a single loop. */
+ unsigned long expired, sampled;
-cant_free:
- /* We are here if we are not able to reclaim memory. There is only one
- * last thing we can try: check if the lazyfree thread has jobs in queue
- * and wait... */
- while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
- if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
- break;
- usleep(1000);
- }
- return C_ERR;
-}
void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) {
- int j, k, count;
- dictEntry *samples[server.maxmemory_samples];
+ redisDb *db = server.db+(current_db % server.dbnum);
- count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
- for (j = 0; j < count; j++) {
- unsigned long long idle;
- sds key;
- robj *o;
- dictEntry *de;
+ /* Increment the DB now so we are sure if we run out of time
+ * in the current DB we'll restart from the next. This allows to
+ * distribute the time evenly across DBs. */
+ current_db++;
- de = samples[j];
- key = dictGetKey(de);
+ /* Continue to expire if at the end of the cycle more than 25%
+ * of the keys were expired. */
+ do {
+ unsigned long num, slots;
+ long long now, ttl_sum;
+ int ttl_samples;
+ iteration++;
- /* If the dictionary we are sampling from is not the main
- * dictionary (but the expires one) we need to lookup the key
- * again in the key dictionary to obtain the value object. */
- if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
- if (sampledict != keydict) de = dictFind(keydict, key);
- o = dictGetVal(de);
- }
+ /* If there is nothing to expire try next DB ASAP. */
+ if ((num = dictSize(db->expires)) == 0) {
+ db->avg_ttl = 0;
+ break;
+ }
+ slots = dictSlots(db->expires);
+ now = mstime();
- /* Calculate the idle time according to the policy. This is called
- * idle just because the code initially handled LRU, but is in fact
- * just a score where an higher score means better candidate. */
- if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
- idle = estimateObjectIdleTime(o);
- } else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
- /* When we use an LRU policy, we sort the keys by idle time
- * so that we expire keys starting from greater idle time.
- * However when the policy is an LFU one, we have a frequency
- * estimation, and we want to evict keys with lower frequency
- * first. So inside the pool we put objects using the inverted
- * frequency subtracting the actual frequency to the maximum
- * frequency of 255. */
- idle = 255-LFUDecrAndReturn(o);
- } else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
- /* In this case the sooner the expire the better. */
- idle = ULLONG_MAX - (long)dictGetVal(de);
- } else {
- serverPanic("Unknown eviction policy in evictionPoolPopulate()");
- }
+ /* When there are less than 1% filled slots, sampling the key
+ * space is expensive, so stop here waiting for better times...
+ * The dictionary will be resized asap. */
+ if (num && slots > DICT_HT_INITIAL_SIZE &&
+ (num*100/slots < 1)) break;
- /* Insert the element inside the pool.
- * First, find the first empty bucket or the first populated
- * bucket that has an idle time smaller than our idle time. */
- k = 0;
- while (k < EVPOOL_SIZE &&
- pool[k].key &&
- pool[k].idle < idle) k++;
- if (k == 0 && pool[EVPOOL_SIZE-1].key != NULL) {
- /* Can't insert if the element is < the worst element we have
- * and there are no empty buckets. */
- continue;
- } else if (k < EVPOOL_SIZE && pool[k].key == NULL) {
- /* Inserting into empty position. No setup needed before insert. */
- } else {
- /* Inserting in the middle. Now k points to the first element
- * greater than the element to insert. */
- if (pool[EVPOOL_SIZE-1].key == NULL) {
- /* Free space on the right? Insert at k shifting
- * all the elements from k to end to the right. */
+ /* The main collection cycle. Sample random keys among keys
+ * with an expire set, checking for expired ones. */
+ expired = 0;
+ sampled = 0;
+ ttl_sum = 0;
+ ttl_samples = 0;
+
+ if (num > config_keys_per_loop)
+ num = config_keys_per_loop;
+
+ /* Here we access the low level representation of the hash table
+ * for speed concerns: this makes this code coupled with dict.c,
+ * but it hardly changed in ten years.
+ *
+ * Note that certain places of the hash table may be empty,
+ * so we want also a stop condition about the number of
+ * buckets that we scanned. However scanning for free buckets
+ * is very fast: we are in the cache line scanning a sequential
+ * array of NULL pointers, so we can scan a lot more buckets
+ * than keys in the same time. */
+ long max_buckets = num*20;
+ long checked_buckets = 0;
- /* Save SDS before overwriting. */
- sds cached = pool[EVPOOL_SIZE-1].cached;
- memmove(pool+k+1,pool+k,
- sizeof(pool[0])*(EVPOOL_SIZE-k-1));
- pool[k].cached = cached;
- } else {
- /* No free space on right? Insert at k-1 */
- k--;
- /* Shift all elements on the left of k (included) to the
- * left, so we discard the element with smaller idle time. */
- sds cached = pool[0].cached; /* Save SDS before overwriting. */
- if (pool[0].key != pool[0].cached) sdsfree(pool[0].key);
- memmove(pool,pool+1,sizeof(pool[0])*k);
- pool[k].cached = cached;
+ while (sampled < num && checked_buckets < max_buckets) {
+ for (int table = 0; table < 2; table++) {
+ if (table == 1 && !dictIsRehashing(db->expires)) break;
+
+ unsigned long idx = db->expires_cursor;
+ idx &= db->expires->ht[table].sizemask;
+ dictEntry *de = db->expires->ht[table].table[idx];
+ long long ttl;
+
+ /* Scan the current bucket of the current table. */
+ checked_buckets++;
+ while(de) {
+ /* Get the next entry now since this entry may get
+ * deleted. */
+ dictEntry *e = de;
+ de = de->next;
+
+ ttl = dictGetSignedIntegerVal(e)-now;
+ if (activeExpireCycleTryExpire(db,e,now)) expired++;
+ if (ttl > 0) {
+ /* We want the average TTL of keys yet
+ * not expired. */
+ ttl_sum += ttl;
+ ttl_samples++;
+ }
+ sampled++;
+ }
+ }
+ db->expires_cursor++;
}
- }
+ total_expired += expired;
+ total_sampled += sampled;
- /* Try to reuse the cached SDS string allocated in the pool entry,
- * because allocating and deallocating this object is costly
- * (according to the profiler, not my fantasy. Remember:
- * premature optimizbla bla bla bla. */
- int klen = sdslen(key);
- if (klen > EVPOOL_CACHED_SDS_SIZE) {
- pool[k].key = sdsdup(key);
- } else {
- memcpy(pool[k].cached,key,klen+1);
- sdssetlen(pool[k].cached,klen);
- pool[k].key = pool[k].cached;
- }
- pool[k].idle = idle;
- pool[k].dbid = dbid;
- }
-}
/* Given an object returns the min number of milliseconds the object was never
- * requested, using an approximated LRU algorithm. */
-unsigned long long estimateObjectIdleTime(robj *o) {
- unsigned long long lruclock = LRU_CLOCK();
- if (lruclock >= o->lru) {
- return (lruclock - o->lru) * LRU_CLOCK_RESOLUTION;
- } else {
- return (lruclock + (LRU_CLOCK_MAX - o->lru)) *
- LRU_CLOCK_RESOLUTION;
- }
-}
lfu-log-factor 10
-lfu-decay-time 1
-```
-`lfu-log-factor` 可以调整计数器counter的增长速度,lfu-log-factor越大,counter增长的越慢。
+ /* Update the average TTL stats for this database. */
+ if (ttl_samples) {
+ long long avg_ttl = ttl_sum/ttl_samples;
-`lfu-decay-time`是一个以分钟为单位的数值,可以调整counter的减少速度
-这里有个问题是 8 位大小够计么,访问一次加 1 的话的确不够,不过大神就是大神,才不会这么简单的加一。往下看代码
-```C
-/* Low level key lookup API, not actually called directly from commands
- * implementations that should instead rely on lookupKeyRead(),
- * lookupKeyWrite() and lookupKeyReadWithFlags(). */
-robj *lookupKey(redisDb *db, robj *key, int flags) {
- dictEntry *de = dictFind(db->dict,key->ptr);
- if (de) {
- robj *val = dictGetVal(de);
+ /* Do a simple running average with a few samples.
+ * We just use the current estimate with a weight of 2%
+ * and the previous estimate with a weight of 98%. */
+ if (db->avg_ttl == 0) db->avg_ttl = avg_ttl;
+ db->avg_ttl = (db->avg_ttl/50)*49 + (avg_ttl/50);
+ }
- /* Update the access time for the ageing algorithm.
- * Don't do it if we have a saving child, as this will trigger
- * a copy on write madness. */
- if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)){
- if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
- // 当淘汰策略是 LFU 时,就会调用这个updateLFU
- updateLFU(val);
- } else {
- val->lru = LRU_CLOCK();
+ /* We can't block forever here even if there are many keys to
+ * expire. So after a given amount of milliseconds return to the
+ * caller waiting for the other active expire cycle. */
+ if ((iteration & 0xf) == 0) { /* check once every 16 iterations. */
+ elapsed = ustime()-start;
+ if (elapsed > timelimit) {
+ timelimit_exit = 1;
+ server.stat_expired_time_cap_reached_count++;
+ break;
+ }
}
- }
- return val;
- } else {
- return NULL;
+ /* We don't repeat the cycle for the current database if there are
+ * an acceptable amount of stale keys (logically expired but yet
+ * not reclained). */
+ } while ((expired*100/sampled) > config_cycle_acceptable_stale);
}
-}
/* Update LFU when an object is accessed.
- * Firstly, decrement the counter if the decrement time is reached.
- * Then logarithmically increment the counter, and update the access time. */
-void updateLFU(robj *val) {
- unsigned long counter = LFUDecrAndReturn(val);
- counter = LFULogIncr(counter);
- val->lru = (LFUGetTimeInMinutes()<<8) | counter;
-}
/* If the object decrement time is reached decrement the LFU counter but
- * do not update LFU fields of the object, we update the access time
- * and counter in an explicit way when the object is really accessed.
- * And we will times halve the counter according to the times of
- * elapsed time than server.lfu_decay_time.
- * Return the object frequency counter.
- *
- * This function is used in order to scan the dataset for the best object
- * to fit: as we check for the candidate, we incrementally decrement the
- * counter of the scanned objects if needed. */
-unsigned long LFUDecrAndReturn(robj *o) {
- // 右移 8 位,拿到上次衰减时间
- unsigned long ldt = o->lru >> 8;
- // 对 255 做与操作,拿到 counter 值
- unsigned long counter = o->lru & 255;
- // 根据lfu_decay_time来算出过了多少个衰减周期
- unsigned long num_periods = server.lfu_decay_time ? LFUTimeElapsed(ldt) / server.lfu_decay_time : 0;
- if (num_periods)
- counter = (num_periods > counter) ? 0 : counter - num_periods;
- return counter;
-}
fnmain(){
+ let s1 =String::from("hello");
+ let len =calculate_length(&s1);
+
+ println!("The length of '{}' is {}", s1, len);
+}
+fncalculate_length(s:&String)->usize{
+ s.len()
+}
/* Low level key lookup API, not actually called directly from commands
+ * implementations that should instead rely on lookupKeyRead(),
+ * lookupKeyWrite() and lookupKeyReadWithFlags(). */
+robj *lookupKey(redisDb *db, robj *key, int flags) {
+ dictEntry *de = dictFind(db->dict,key->ptr);
+ if (de) {
+ robj *val = dictGetVal(de);
+
+ /* Update the access time for the ageing algorithm.
+ * Don't do it if we have a saving child, as this will trigger
+ * a copy on write madness. */
+ if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)){
+ if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
+ // 这个是后面一节的内容
+ updateLFU(val);
+ } else {
+ // 对于这个分支,访问时就会去更新 lru 值
+ val->lru = LRU_CLOCK();
+ }
+ }
+ return val;
+ } else {
+ return NULL;
+ }
+}
+/* This function is used to obtain the current LRU clock.
+ * If the current resolution is lower than the frequency we refresh the
+ * LRU clock (as it should be in production servers) we return the
+ * precomputed value, otherwise we need to resort to a system call. */
+unsigned int LRU_CLOCK(void) {
+ unsigned int lruclock;
+ if (1000/server.hz <= LRU_CLOCK_RESOLUTION) {
+ // 如果服务器的频率server.hz大于 1 时就是用系统预设的 lruclock
+ lruclock = server.lruclock;
+ } else {
+ lruclock = getLRUClock();
+ }
+ return lruclock;
+}
+/* Return the LRU clock, based on the clock resolution. This is a time
+ * in a reduced-bits format that can be used to set and check the
+ * object->lru field of redisObject structures. */
+unsigned int getLRUClock(void) {
+ return (mstime()/LRU_CLOCK_RESOLUTION) & LRU_CLOCK_MAX;
+}
/* If this function gets called we already read a whole
+ * command, arguments are in the client argv/argc fields.
+ * processCommand() execute the command or prepare the
+ * server for a bulk read from the client.
+ *
+ * If C_OK is returned the client is still alive and valid and
+ * other operations can be performed by the caller. Otherwise
+ * if C_ERR is returned the client was destroyed (i.e. after QUIT). */
+int processCommand(client *c) {
+ moduleCallCommandFilters(c);
+
+
+
+ /* Handle the maxmemory directive.
+ *
+ * Note that we do not want to reclaim memory if we are here re-entering
+ * the event loop since there is a busy Lua script running in timeout
+ * condition, to avoid mixing the propagation of scripts with the
+ * propagation of DELs due to eviction. */
+ if (server.maxmemory && !server.lua_timedout) {
+ int out_of_memory = freeMemoryIfNeededAndSafe() == C_ERR;
+ /* freeMemoryIfNeeded may flush slave output buffers. This may result
+ * into a slave, that may be the active client, to be freed. */
+ if (server.current_client == NULL) return C_ERR;
+
+ /* It was impossible to free enough memory, and the command the client
+ * is trying to execute is denied during OOM conditions or the client
+ * is in MULTI/EXEC context? Error. */
+ if (out_of_memory &&
+ (c->cmd->flags & CMD_DENYOOM ||
+ (c->flags & CLIENT_MULTI &&
+ c->cmd->proc != execCommand &&
+ c->cmd->proc != discardCommand)))
+ {
+ flagTransaction(c);
+ addReply(c, shared.oomerr);
+ return C_OK;
+ }
+ }
+}
/* This is a wrapper for freeMemoryIfNeeded() that only really calls the
+ * function if right now there are the conditions to do so safely:
+ *
+ * - There must be no script in timeout condition.
+ * - Nor we are loading data right now.
+ *
+ */
+int freeMemoryIfNeededAndSafe(void) {
+ if (server.lua_timedout || server.loading) return C_OK;
+ return freeMemoryIfNeeded();
+}
+/* This function is periodically called to see if there is memory to free
+ * according to the current "maxmemory" settings. In case we are over the
+ * memory limit, the function will try to free some memory to return back
+ * under the limit.
+ *
+ * The function returns C_OK if we are under the memory limit or if we
+ * were over the limit, but the attempt to free memory was successful.
+ * Otehrwise if we are over the memory limit, but not enough memory
+ * was freed to return back under the limit, the function returns C_ERR. */
+int freeMemoryIfNeeded(void) {
+ int keys_freed = 0;
+ /* By default replicas should ignore maxmemory
+ * and just be masters exact copies. */
+ if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
+
+ size_t mem_reported, mem_tofree, mem_freed;
+ mstime_t latency, eviction_latency;
+ long long delta;
+ int slaves = listLength(server.slaves);
- let word =first_word(&s);
+ /* When clients are paused the dataset should be static not just from the
+ * POV of clients not being able to write, but also from the POV of
+ * expires and evictions of keys not being performed. */
+ if (clientsArePaused()) return C_OK;
+ if (getMaxmemoryState(&mem_reported,NULL,&mem_tofree,NULL) == C_OK)
+ return C_OK;
- s.clear();
+ mem_freed = 0;
- // 这时候虽然 word 还是 5,但是 s 已经被清除了,所以就没存在的意义
-}
-
这里其实我们就需要关注 s 的存在性,代码的逻辑合理性就需要额外去维护,此时我们就可以用切片
-
let s =String::from("hello world")
+ if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
+ goto cant_free; /* We need to free memory, but policy forbids. */
-let hello =&s[0..5];
-let world =&s[6..11];
+ /* If the key exists, is our pick. Otherwise it is
+ * a ghost and we need to try the next element. */
+ if (de) {
+ bestkey = dictGetKey(de);
+ break;
+ } else {
+ /* Ghost... Iterate again. */
+ }
+ }
+ }
+ }
+ /* volatile-random and allkeys-random policy */
+ else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
+ server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
+ {
+ /* When evicting a random key, we try to evict a key for
+ * each DB, so we use the static 'next_db' variable to
+ * incrementally visit all DBs. */
+ for (i = 0; i < server.dbnum; i++) {
+ j = (++next_db) % server.dbnum;
+ db = server.db+j;
+ dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
+ db->dict : db->expires;
+ if (dictSize(dict) != 0) {
+ de = dictGetRandomKey(dict);
+ bestkey = dictGetKey(de);
+ bestdbid = j;
+ break;
+ }
+ }
+ }
-
-publicclassCustomSpringEventextendsApplicationEvent{
+ /* Finally remove the selected key. */
+ if (bestkey) {
+ db = server.db+bestdbid;
+ robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
+ propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
+ /* We compute the amount of memory freed by db*Delete() alone.
+ * It is possible that actually the memory needed to propagate
+ * the DEL in AOF and replication link is greater than the one
+ * we are freeing removing the key, but we can't account for
+ * that otherwise we would never exit the loop.
+ *
+ * AOF and Output buffer memory will be freed eventually so
+ * we only care about memory used by the key space. */
+ delta = (long long) zmalloc_used_memory();
+ latencyStartMonitor(eviction_latency);
+ if (server.lazyfree_lazy_eviction)
+ dbAsyncDelete(db,keyobj);
+ else
+ dbSyncDelete(db,keyobj);
+ latencyEndMonitor(eviction_latency);
+ latencyAddSampleIfNeeded("eviction-del",eviction_latency);
+ latencyRemoveNestedEvent(latency,eviction_latency);
+ delta -= (long long) zmalloc_used_memory();
+ mem_freed += delta;
+ server.stat_evictedkeys++;
+ notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
+ keyobj, db->id);
+ decrRefCount(keyobj);
+ keys_freed++;
- privateString message;
+ /* When the memory to free starts to be big enough, we may
+ * start spending so much time here that is impossible to
+ * deliver data to the slaves fast enough, so we force the
+ * transmission here inside the loop. */
+ if (slaves) flushSlavesOutputBuffers();
- publicCustomSpringEvent(Object source,String message){
- super(source);
- this.message = message;
- }
- publicStringgetMessage(){
- return message;
- }
-}
publicabstractclassApplicationEventextendsEventObject{
- privatestaticfinallong serialVersionUID =7099057708183571937L;
- privatefinallong timestamp;
+ /* Normally our stop condition is the ability to release
+ * a fixed, pre-computed amount of memory. However when we
+ * are deleting objects in another thread, it's better to
+ * check, from time to time, if we already reached our target
+ * memory, since the "mem_freed" amount is computed only
+ * across the dbAsyncDelete() call, while the thread can
+ * release the memory all the time. */
+ if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) {
+ if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) {
+ /* Let's satisfy our stop condition. */
+ mem_freed = mem_tofree;
+ }
+ }
+ } else {
+ latencyEndMonitor(latency);
+ latencyAddSampleIfNeeded("eviction-cycle",latency);
+ goto cant_free; /* nothing to free... */
+ }
+ }
+ latencyEndMonitor(latency);
+ latencyAddSampleIfNeeded("eviction-cycle",latency);
+ return C_OK;
- publicApplicationEvent(Object source){
- super(source);
- this.timestamp =System.currentTimeMillis();
- }
+cant_free:
+ /* We are here if we are not able to reclaim memory. There is only one
+ * last thing we can try: check if the lazyfree thread has jobs in queue
+ * and wait... */
+ while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
+ if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
+ break;
+ usleep(1000);
+ }
+ return C_ERR;
+}
@Component
-publicclassCustomSpringEventPublisher{
+ /* If the dictionary we are sampling from is not the main
+ * dictionary (but the expires one) we need to lookup the key
+ * again in the key dictionary to obtain the value object. */
+ if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
+ if (sampledict != keydict) de = dictFind(keydict, key);
+ o = dictGetVal(de);
+ }
- @Resource
- privateApplicationEventPublisher applicationEventPublisher;
+ /* Calculate the idle time according to the policy. This is called
+ * idle just because the code initially handled LRU, but is in fact
+ * just a score where an higher score means better candidate. */
+ if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
+ idle = estimateObjectIdleTime(o);
+ } else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
+ /* When we use an LRU policy, we sort the keys by idle time
+ * so that we expire keys starting from greater idle time.
+ * However when the policy is an LFU one, we have a frequency
+ * estimation, and we want to evict keys with lower frequency
+ * first. So inside the pool we put objects using the inverted
+ * frequency subtracting the actual frequency to the maximum
+ * frequency of 255. */
+ idle = 255-LFUDecrAndReturn(o);
+ } else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
+ /* In this case the sooner the expire the better. */
+ idle = ULLONG_MAX - (long)dictGetVal(de);
+ } else {
+ serverPanic("Unknown eviction policy in evictionPoolPopulate()");
+ }
- publicvoidpublishCustomEvent(finalString message){
- System.out.println("Publishing custom event. ");
- CustomSpringEvent customSpringEvent =newCustomSpringEvent(this, message);
- applicationEventPublisher.publishEvent(customSpringEvent);
- }
-}
-
这里的 ApplicationEventPublisher 是 Spring 的方法接口
-
@FunctionalInterface
-publicinterfaceApplicationEventPublisher{
- defaultvoidpublishEvent(ApplicationEvent event){
- this.publishEvent((Object)event);
- }
+ /* Insert the element inside the pool.
+ * First, find the first empty bucket or the first populated
+ * bucket that has an idle time smaller than our idle time. */
+ k = 0;
+ while (k < EVPOOL_SIZE &&
+ pool[k].key &&
+ pool[k].idle < idle) k++;
+ if (k == 0 && pool[EVPOOL_SIZE-1].key != NULL) {
+ /* Can't insert if the element is < the worst element we have
+ * and there are no empty buckets. */
+ continue;
+ } else if (k < EVPOOL_SIZE && pool[k].key == NULL) {
+ /* Inserting into empty position. No setup needed before insert. */
+ } else {
+ /* Inserting in the middle. Now k points to the first element
+ * greater than the element to insert. */
+ if (pool[EVPOOL_SIZE-1].key == NULL) {
+ /* Free space on the right? Insert at k shifting
+ * all the elements from k to end to the right. */
- voidpublishEvent(Object var1);
-}
@FunctionalInterface
-publicinterfaceApplicationListener<EextendsApplicationEvent>extendsEventListener{
- voidonApplicationEvent(E var1);
+ /* Save SDS before overwriting. */
+ sds cached = pool[EVPOOL_SIZE-1].cached;
+ memmove(pool+k+1,pool+k,
+ sizeof(pool[0])*(EVPOOL_SIZE-k-1));
+ pool[k].cached = cached;
+ } else {
+ /* No free space on right? Insert at k-1 */
+ k--;
+ /* Shift all elements on the left of k (included) to the
+ * left, so we discard the element with smaller idle time. */
+ sds cached = pool[0].cached; /* Save SDS before overwriting. */
+ if (pool[0].key != pool[0].cached) sdsfree(pool[0].key);
+ memmove(pool,pool+1,sizeof(pool[0])*k);
+ pool[k].cached = cached;
+ }
+ }
- static<T>ApplicationListener<PayloadApplicationEvent<T>>forPayload(Consumer<T> consumer){
- return(event)->{
- consumer.accept(event.getPayload());
- };
- }
-}
+ /* Try to reuse the cached SDS string allocated in the pool entry,
+ * because allocating and deallocating this object is costly
+ * (according to the profiler, not my fantasy. Remember:
+ * premature optimizbla bla bla bla. */
+ int klen = sdslen(key);
+ if (klen > EVPOOL_CACHED_SDS_SIZE) {
+ pool[k].key = sdsdup(key);
+ } else {
+ memcpy(pool[k].cached,key,klen+1);
+ sdssetlen(pool[k].cached,klen);
+ pool[k].key = pool[k].cached;
+ }
+ pool[k].idle = idle;
+ pool[k].dbid = dbid;
+ }
+}
+
/* Given an object returns the min number of milliseconds the object was never
+ * requested, using an approximated LRU algorithm. */
+unsigned long long estimateObjectIdleTime(robj *o) {
+ unsigned long long lruclock = LRU_CLOCK();
+ if (lruclock >= o->lru) {
+ return (lruclock - o->lru) * LRU_CLOCK_RESOLUTION;
+ } else {
+ return (lruclock + (LRU_CLOCK_MAX - o->lru)) *
+ LRU_CLOCK_RESOLUTION;
+ }
+}
+`lfu-decay-time`是一个以分钟为单位的数值,可以调整counter的减少速度
+这里有个问题是 8 位大小够计么,访问一次加 1 的话的确不够,不过大神就是大神,才不会这么简单的加一。往下看代码
+```C
+/* Low level key lookup API, not actually called directly from commands
+ * implementations that should instead rely on lookupKeyRead(),
+ * lookupKeyWrite() and lookupKeyReadWithFlags(). */
+robj *lookupKey(redisDb *db, robj *key, int flags) {
+ dictEntry *de = dictFind(db->dict,key->ptr);
+ if (de) {
+ robj *val = dictGetVal(de);
-
就能看到接收到消息了。
+ /* Update the access time for the ageing algorithm.
+ * Don't do it if we have a saving child, as this will trigger
+ * a copy on write madness. */
+ if (!hasActiveChildProcess() && !(flags & LOOKUP_NOTOUCH)){
+ if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
+ // 当淘汰策略是 LFU 时,就会调用这个updateLFU
+ updateLFU(val);
+ } else {
+ val->lru = LRU_CLOCK();
+ }
+ }
+ return val;
+ } else {
+ return NULL;
+ }
+}
/* Update LFU when an object is accessed.
+ * Firstly, decrement the counter if the decrement time is reached.
+ * Then logarithmically increment the counter, and update the access time. */
+void updateLFU(robj *val) {
+ unsigned long counter = LFUDecrAndReturn(val);
+ counter = LFULogIncr(counter);
+ val->lru = (LFUGetTimeInMinutes()<<8) | counter;
+}
/* If the object decrement time is reached decrement the LFU counter but
+ * do not update LFU fields of the object, we update the access time
+ * and counter in an explicit way when the object is really accessed.
+ * And we will times halve the counter according to the times of
+ * elapsed time than server.lfu_decay_time.
+ * Return the object frequency counter.
+ *
+ * This function is used in order to scan the dataset for the best object
+ * to fit: as we check for the candidate, we incrementally decrement the
+ * counter of the scanned objects if needed. */
+unsigned long LFUDecrAndReturn(robj *o) {
+ // 右移 8 位,拿到上次衰减时间
+ unsigned long ldt = o->lru >> 8;
+ // 对 255 做与操作,拿到 counter 值
+ unsigned long counter = o->lru & 255;
+ // 根据lfu_decay_time来算出过了多少个衰减周期
+ unsigned long num_periods = server.lfu_decay_time ? LFUTimeElapsed(ldt) / server.lfu_decay_time : 0;
+ if (num_periods)
+ counter = (num_periods > counter) ? 0 : counter - num_periods;
+ return counter;
+}
]]>
- Java
- Spring
+ Redis
+ 数据结构
+ C
+ 源码
+ Redis
- Java
- Spring
- Spring Event
+ redis
+ 数据结构
+ 源码
- rust学习笔记-所有权二
- /2021/04/18/rust%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0-%E6%89%80%E6%9C%89%E6%9D%83%E4%BA%8C/
- 这里需要说道函数和返回值了 可以看书上的这个例子 对于这种情况,当进入函数内部时,会把传入的变量的所有权转移进函数内部,如果最后还是要返回该变量,但是如果此时还要返回别的计算结果,就可能需要笨拙地使用元组
-
引用
此时我们就可以用引用来解决这个问题
-
fnmain(){
- let s1 =String::from("hello");
- let len =calculate_length(&s1);
-
- println!("The length of '{}' is {}", s1, len);
-}
-fncalculate_length(s:&String)->usize{
- s.len()
-}
这个真的长见识了, 可以看到,原来是 A,B,C 对象引用了 N,这里会在第一次遍历的时候把这种引用反过来,让 N 的对象头部保存下 A 的地址,表示这类引用,然后在遍历到 B 的时候在链起来,到最后就会把所有引用了 N 对象的所有对象通过引线链起来,在第二次遍历的时候就把更新A,B,C 对象引用的 N 地址,并且移动 N 对象
]]>
- 读后感
- 生活
+ Java
+ Spring
- 生活
- 读后感
+ Java
+ Spring
+ Spring Event
- 一个 nginx 的简单记忆点
- /2022/08/21/%E4%B8%80%E4%B8%AA-nginx-%E7%9A%84%E7%AE%80%E5%8D%95%E8%AE%B0%E5%BF%86%E7%82%B9/
- 上周在处理一个 nginx 配置的时候,发现了一个之前不理解的小点,说一个场景,就是我们一般的处理方式就是一个 ip 端口只能配置一个域名的服务,比如 https://nicksxs.me 对应配置到 127.0.0.1:443,如果我想要把 https://nicksxs.com 也解析到这个服务器,并转发到不同的下游,这里就需要借助所谓的 SNI 的功能
-
Server Name Indication
A more generic solution for running several HTTPS servers on a single IP address is TLS Server Name Indication extension (SNI, RFC 6066), which allows a browser to pass a requested server name during the SSL handshake and, therefore, the server will know which certificate it should use for the connection. SNI is currently supported by most modern browsers, though may not be used by some old or special clients. 来源 机翻一下:在单个 IP 地址上运行多个 HTTPS 服务器的更通用的解决方案是 TLS 服务器名称指示扩展(SNI,RFC 6066),它允许浏览器在 SSL 握手期间传递请求的服务器名称,因此,服务器将知道哪个 它应该用于连接的证书。 目前大多数现代浏览器都支持 SNI,但某些旧的或特殊的客户端可能不使用 SNI。
这个真的长见识了, 可以看到,原来是 A,B,C 对象引用了 N,这里会在第一次遍历的时候把这种引用反过来,让 N 的对象头部保存下 A 的地址,表示这类引用,然后在遍历到 B 的时候在链起来,到最后就会把所有引用了 N 对象的所有对象通过引线链起来,在第二次遍历的时候就把更新A,B,C 对象引用的 N 地址,并且移动 N 对象
A more generic solution for running several HTTPS servers on a single IP address is TLS Server Name Indication extension (SNI, RFC 6066), which allows a browser to pass a requested server name during the SSL handshake and, therefore, the server will know which certificate it should use for the connection. SNI is currently supported by most modern browsers, though may not be used by some old or special clients. 来源 机翻一下:在单个 IP 地址上运行多个 HTTPS 服务器的更通用的解决方案是 TLS 服务器名称指示扩展(SNI,RFC 6066),它允许浏览器在 SSL 握手期间传递请求的服务器名称,因此,服务器将知道哪个 它应该用于连接的证书。 目前大多数现代浏览器都支持 SNI,但某些旧的或特殊的客户端可能不使用 SNI。
第二种方式是 Windows 任务管理器中性能 tab 下的”打开资源监视器”, ,假如我的 U 盘的盘符是F: 就可以搜索到占用这个盘符下文件的进程,这里千万小心‼️‼️,不可轻易杀掉这些进程,有些系统进程如果轻易杀掉会导致蓝屏等问题,不可轻易尝试,除非能确认这些进程的作用。 对于前两种方式对我来说都无效,
+
第三种
所以尝试了第三种, 就是磁盘脱机的方式,在”计算机”右键管理,点击”磁盘管理”,可以找到 U 盘盘符右键,点击”脱机”,然后再”推出”,这个对我来说也不行
+
第四种
这种是唯一对我有效的,在开始菜单搜索”event”,可以搜到”事件查看器”, ,这个可以看到当前最近 Windows 发生的事件,打开这个后就点击U盘推出,因为推不出来也是一种错误事件,点击下刷新就能在这看到具体是因为什么推出不了,具体的进程信息 最后发现是英特尔的驱动管理程序的一个进程,关掉就退出了,虽然前面说的某里的进程是流氓,但这边是真的冤枉它了
第二种方式是 Windows 任务管理器中性能 tab 下的”打开资源监视器”, ,假如我的 U 盘的盘符是F: 就可以搜索到占用这个盘符下文件的进程,这里千万小心‼️‼️,不可轻易杀掉这些进程,有些系统进程如果轻易杀掉会导致蓝屏等问题,不可轻易尝试,除非能确认这些进程的作用。 对于前两种方式对我来说都无效,
-
第三种
所以尝试了第三种, 就是磁盘脱机的方式,在”计算机”右键管理,点击”磁盘管理”,可以找到 U 盘盘符右键,点击”脱机”,然后再”推出”,这个对我来说也不行
-
第四种
这种是唯一对我有效的,在开始菜单搜索”event”,可以搜到”事件查看器”, ,这个可以看到当前最近 Windows 发生的事件,打开这个后就点击U盘推出,因为推不出来也是一种错误事件,点击下刷新就能在这看到具体是因为什么推出不了,具体的进程信息 最后发现是英特尔的驱动管理程序的一个进程,关掉就退出了,虽然前面说的某里的进程是流氓,但这边是真的冤枉它了
这次碰到一个比较奇怪的问题,应该统一发布脚本统一给应用启动参数传了个 -Dserver.port=xxxx,其实这个端口会作为 dubbo 的服务端口,并且应用也不提供 web 服务,但是在启动的时候会报embedded servlet container failed to start. port xxxx was already in use就觉得有点奇怪,仔细看了启动参数猜测可能是这个问题,有可能是依赖的二方三方包带了 spring-web 的包,然后基于 springboot 的 auto configuration 会把这个自己加载,就在本地复现了下这个问题,结果的确是这个问题。
第一部分是最核心的,如何基于 Spring JDBC 和 Druid 来实现数据源切换,是继承了org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource 这个类,他的determineCurrentLookupKey方法会被调用来获得用来决定选择那个数据源的对象,也就是 lookupKey,也可以通过这个类看到就是通过这个 lookupKey 来路由找到数据源。
+
publicclassDynamicDataSourceextendsAbstractRoutingDataSource{
- /**
- * The application should not run as a web application and should not start an
- * embedded web server.
- */
- NONE,
+ @Override
+ protectedObjectdetermineCurrentLookupKey(){
+ if(DatabaseContextHolder.getDatabaseType()!=null){
+ returnDatabaseContextHolder.getDatabaseType().getName();
+ }
+ returnDatabaseType.MASTER1.getName();
+ }
+}
publicclassDatabaseContextHolder{
+ publicstaticfinalThreadLocal<DatabaseType> databaseTypeThreadLocal =newThreadLocal<>();
- /**
- * The application should run as a servlet-based web application and should start an
- * embedded servlet web server.
- */
- SERVLET,
+ publicstaticDatabaseTypegetDatabaseType(){
+ return databaseTypeThreadLocal.get();
+ }
- /**
- * The application should run as a reactive web application and should start an
- * embedded reactive web server.
- */
- REACTIVE
+ publicstaticvoidputDatabaseType(DatabaseType databaseType){
+ databaseTypeThreadLocal.set(databaseType);
+ }
-}
- if(extName ==null){
- thrownewIllegalStateException(
- "Fail to get extension(com.alibaba.dubbo.rpc.Protocol) name from url("+
- url.toString()+") use keys([protocol])");
- }
+
这次碰到一个比较奇怪的问题,应该统一发布脚本统一给应用启动参数传了个 -Dserver.port=xxxx,其实这个端口会作为 dubbo 的服务端口,并且应用也不提供 web 服务,但是在启动的时候会报embedded servlet container failed to start. port xxxx was already in use就觉得有点奇怪,仔细看了启动参数猜测可能是这个问题,有可能是依赖的二方三方包带了 spring-web 的包,然后基于 springboot 的 auto configuration 会把这个自己加载,就在本地复现了下这个问题,结果的确是这个问题。
publicenumWebApplicationType{
- publiccom.alibaba.dubbo.rpc.Invokerrefer(java.lang.Class arg0,
- com.alibaba.dubbo.common.URL arg1)
- throwscom.alibaba.dubbo.rpc.RpcException{
- if(arg1 ==null){
- thrownewIllegalArgumentException("url == null");
- }
+ /**
+ * The application should not run as a web application and should not start an
+ * embedded web server.
+ */
+ NONE,
- com.alibaba.dubbo.common.URL url = arg1;
- // 其实前面所说的逻辑就在这里呈现了
- String extName =((url.getProtocol()==null)?"dubbo"
- : url.getProtocol());
+ /**
+ * The application should run as a servlet-based web application and should start an
+ * embedded servlet web server.
+ */
+ SERVLET,
- if(extName ==null){
- thrownewIllegalStateException(
- "Fail to get extension(com.alibaba.dubbo.rpc.Protocol) name from url("+
- url.toString()+") use keys([protocol])");
- }
- // 在这就是实际的通过dubbo 的 spi 去加载实际对应的扩展
- com.alibaba.dubbo.rpc.Protocol extension =(com.alibaba.dubbo.rpc.Protocol)ExtensionLoader.getExtensionLoader(com.alibaba.dubbo.rpc.Protocol.class)
- .getExtension(extName);
+ /**
+ * The application should run as a reactive web application and should start an
+ * embedded reactive web server.
+ */
+ REACTIVE
- return extension.refer(arg0, arg1);
- }
-}
-
@Override
-publicfinalResultSetexecuteQuery(finalString sql)throwsSQLException{
- thrownewSQLFeatureNotSupportedException("executeQuery with SQL for PreparedStatement");
-}
SELECT ... FROM is a consistent read, reading a snapshot of the database and setting no locks unless the transaction isolation level is set to SERIALIZABLE. For SERIALIZABLE level, the search sets shared next-key locks on the index records it encounters. However, only an index record lock is required for statements that lock rows using a unique index to search for a unique row.
除了第一条是 S 锁之外,其他都是 X 排他锁,这边在顺带下,S 锁表示共享锁, X 表示独占锁,同为 S 锁之间不冲突,S 与 X,X 与 S,X 与 X 之间都冲突,也就是加了前者,后者就加不上了 我们知道对于 RC 级别会出现幻读现象,对于 RR 级别不会出现,主要的区别是 RR 级别下对于以上的加锁读取都根据情况加上了 gap 锁,那么是不是 RR 级别下以上所有的都是要加 gap 锁呢,当然不是 举个例子,RR 事务隔离级别下,table1 有个主键id 字段 select * from table1 where id = 10 for update 这条语句要加 gap 锁吗? 答案是不需要,这里其实算是我看了这么久的一点自己的理解,啥时候要加 gap 锁,判断的条件是根据我查询的数据是否会因为不加 gap 锁而出现数量的不一致,我上面这条查询语句,在什么情况下会出现查询结果数量不一致呢,只要在这条记录被更新或者删除的时候,有没有可能我第一次查出来一条,第二次变成两条了呢,不可能,因为是主键索引。 再变更下这个题的条件,当 id 不是主键,但是是唯一索引,这样需要怎么加锁,注意问题是怎么加锁,不是需不需要加 gap 锁,这里呢就是稍微延伸一下,把聚簇索引(主键索引)和二级索引带一下,当 id 不是主键,说明是个二级索引,但是它是唯一索引,体会下,首先对于 id = 10这个二级索引肯定要加锁,要不要锁 gap 呢,不用,因为是唯一索引,id = 10 只可能有这一条记录,然后呢,这样是不是就好了,还不行,因为啥,因为它是二级索引,对应的主键索引的记录才是真正的数据,万一被更新掉了咋办,所以在 id = 10 对应的主键索引上也需要加上锁(默认都是 record lock行锁),那主键索引上要不要加 gap 呢,也不用,也是精确定位到这一条记录 最后呢,当 id 不是主键,也不是唯一索引,只是个普通的索引,这里就需要大名鼎鼎的 gap 锁了, 是时候画个图了 其实核心的目的还是不让这个 id=10 的记录不会出现幻读,那么就需要在 id 这个索引上加上三个 gap 锁,主键索引上就不用了,在 id 索引上已经控制住了id = 10 不会出现幻读,主键索引上这两条对应的记录已经锁了,所以就这样 OK 了
@Override
+publicfinalResultSetexecuteQuery(finalString sql)throwsSQLException{
+ thrownewSQLFeatureNotSupportedException("executeQuery with SQL for PreparedStatement");
+}
SELECT ... FROM is a consistent read, reading a snapshot of the database and setting no locks unless the transaction isolation level is set to SERIALIZABLE. For SERIALIZABLE level, the search sets shared next-key locks on the index records it encounters. However, only an index record lock is required for statements that lock rows using a unique index to search for a unique row.
除了第一条是 S 锁之外,其他都是 X 排他锁,这边在顺带下,S 锁表示共享锁, X 表示独占锁,同为 S 锁之间不冲突,S 与 X,X 与 S,X 与 X 之间都冲突,也就是加了前者,后者就加不上了 我们知道对于 RC 级别会出现幻读现象,对于 RR 级别不会出现,主要的区别是 RR 级别下对于以上的加锁读取都根据情况加上了 gap 锁,那么是不是 RR 级别下以上所有的都是要加 gap 锁呢,当然不是 举个例子,RR 事务隔离级别下,table1 有个主键id 字段 select * from table1 where id = 10 for update 这条语句要加 gap 锁吗? 答案是不需要,这里其实算是我看了这么久的一点自己的理解,啥时候要加 gap 锁,判断的条件是根据我查询的数据是否会因为不加 gap 锁而出现数量的不一致,我上面这条查询语句,在什么情况下会出现查询结果数量不一致呢,只要在这条记录被更新或者删除的时候,有没有可能我第一次查出来一条,第二次变成两条了呢,不可能,因为是主键索引。 再变更下这个题的条件,当 id 不是主键,但是是唯一索引,这样需要怎么加锁,注意问题是怎么加锁,不是需不需要加 gap 锁,这里呢就是稍微延伸一下,把聚簇索引(主键索引)和二级索引带一下,当 id 不是主键,说明是个二级索引,但是它是唯一索引,体会下,首先对于 id = 10这个二级索引肯定要加锁,要不要锁 gap 呢,不用,因为是唯一索引,id = 10 只可能有这一条记录,然后呢,这样是不是就好了,还不行,因为啥,因为它是二级索引,对应的主键索引的记录才是真正的数据,万一被更新掉了咋办,所以在 id = 10 对应的主键索引上也需要加上锁(默认都是 record lock行锁),那主键索引上要不要加 gap 呢,也不用,也是精确定位到这一条记录 最后呢,当 id 不是主键,也不是唯一索引,只是个普通的索引,这里就需要大名鼎鼎的 gap 锁了, 是时候画个图了 其实核心的目的还是不让这个 id=10 的记录不会出现幻读,那么就需要在 id 这个索引上加上三个 gap 锁,主键索引上就不用了,在 id 索引上已经控制住了id = 10 不会出现幻读,主键索引上这两条对应的记录已经锁了,所以就这样 OK 了
+]]>
+
+ Mysql
+ C
+ 数据结构
+ 源码
+ Mysql
+
+
+ mysql
+ 数据结构
+ 源码
+ mvcc
+ read view
+ gap lock
+ next-key lock
+ 幻读
+
+
+
+ 聊聊 mysql 的 MVCC
+ /2020/04/26/%E8%81%8A%E8%81%8A-mysql-%E7%9A%84-MVCC/
+ 很久以前,有位面试官问到,你知道 mysql 的事务隔离级别吗,“额 O__O …,不太清楚”,完了之后我就去网上找相关的文章,找到了这篇MySQL 四种事务隔离级的说明, 文章写得特别好,看了这个就懂了各个事务隔离级别都是啥,不过看了这个之后多思考一下的话还是会发现问题,这么神奇的事务隔离级别是怎么实现的呢
+
腆着脸说虽然这个不可行,但是思路是对的,具体实行起来需要做一系列(肥肠多)的改动,首先根据我的理解,其实这个拷贝一个表是变成拷贝一条记录,但是如果有多个事务,那就得拷贝多次,这个问题其实可以借助版本管理系统来解释,在用版本管理系统,git 之类的之前,很原始的可能是开发完一个功能后,就打个压缩包用时间等信息命名,然后如果后面要找回这个就直接用这个压缩包的就行了,后来有了 svn,git 中心式和分布式的版本管理系统,它的一个特点是粒度可以控制到文件和代码行级别,对应的我们的 mysql 事务是不是也可以从一开始预想的表级别细化到行的级别,可能之前很多人都了解过,数据库的一行记录除了我们用户自定义的字段,还有一些额外的字段,去源码data0type.h里捞一下
+
/* Precise data types for system columns and the length of those columns;
+NOTE: the values must run from 0 up in the order given! All codes must
+be less than 256 */
+#defineDATA_ROW_ID0/* row id: a 48-bit integer */
+#defineDATA_ROW_ID_LEN6/* stored length for row id */
+
+/** Transaction id: 6 bytes */
+constexpr size_t DATA_TRX_ID =1;
+
+/** Transaction ID type size in bytes. */
+constexpr size_t DATA_TRX_ID_LEN =6;
+
+/** Rollback data pointer: 7 bytes */
+constexpr size_t DATA_ROLL_PTR =2;
+
+/** Rollback data pointer type size in bytes. */
+constexpr size_t DATA_ROLL_PTR_LEN =7;
如果记录的事务 id 介于 m_low_limit_id 和 m_up_limit_id 之间,则要判断它是否在 m_ids 中,如果在,不可见,如果不在,表示已提交,可见 具体的代码捞一下看看
/** Check whether the changes by id are visible.
+ @param[in] id transaction id to check against the view
+ @param[in] name table name
+ @return whether the view sees the modifications of id. */
+ bool changes_visible(trx_id_t id, const table_name_t &name) const
+ MY_ATTRIBUTE((warn_unused_result)) {
+ ut_ad(id > 0);
+
+ if (id < m_up_limit_id || id == m_creator_trx_id) {
+ return (true);
+ }
+
+ check_trx_id_sanity(id, name);
+
+ if (id >= m_low_limit_id) {
+ return (false);
+
+ } else if (m_ids.empty()) {
+ return (true);
+ }
+
+ const ids_t::value_type *p = m_ids.data();
+
+ return (!std::binary_search(p, p + m_ids.size(), id));
+ }
腆着脸说虽然这个不可行,但是思路是对的,具体实行起来需要做一系列(肥肠多)的改动,首先根据我的理解,其实这个拷贝一个表是变成拷贝一条记录,但是如果有多个事务,那就得拷贝多次,这个问题其实可以借助版本管理系统来解释,在用版本管理系统,git 之类的之前,很原始的可能是开发完一个功能后,就打个压缩包用时间等信息命名,然后如果后面要找回这个就直接用这个压缩包的就行了,后来有了 svn,git 中心式和分布式的版本管理系统,它的一个特点是粒度可以控制到文件和代码行级别,对应的我们的 mysql 事务是不是也可以从一开始预想的表级别细化到行的级别,可能之前很多人都了解过,数据库的一行记录除了我们用户自定义的字段,还有一些额外的字段,去源码data0type.h里捞一下
-
/* Precise data types for system columns and the length of those columns;
-NOTE: the values must run from 0 up in the order given! All codes must
-be less than 256 */
-#defineDATA_ROW_ID0/* row id: a 48-bit integer */
-#defineDATA_ROW_ID_LEN6/* stored length for row id */
-
-/** Transaction id: 6 bytes */
-constexpr size_t DATA_TRX_ID =1;
-
-/** Transaction ID type size in bytes. */
-constexpr size_t DATA_TRX_ID_LEN =6;
-
-/** Rollback data pointer: 7 bytes */
-constexpr size_t DATA_ROLL_PTR =2;
-
-/** Rollback data pointer type size in bytes. */
-constexpr size_t DATA_ROLL_PTR_LEN =7;
]]>
- Mac
- PHP
- Homebrew
- PHP
- icu4c
+ 生活
+ 旅游
- Mac
- PHP
- Homebrew
- icu4c
- zsh
+ 生活
+ 杭州
+ 旅游
+ 厦门
+ 中山路
+ 局口街
+ 鼓浪屿
+ 曾厝垵
+ 植物园
+ 马戏团
+ 沙茶面
+ 海蛎煎
+
+
+
+ 聊聊我刚学会的应用诊断方法
+ /2020/05/22/%E8%81%8A%E8%81%8A%E6%88%91%E5%88%9A%E5%AD%A6%E4%BC%9A%E7%9A%84%E5%BA%94%E7%94%A8%E8%AF%8A%E6%96%AD%E6%96%B9%E6%B3%95/
+ 因为传说中的出身问题,我以前写的是PHP,在使用 swoole 之前,基本的应用调试手段就是简单粗暴的 var_dump,exit,对于单进程模型的 PHP 也是简单有效,技术栈换成 Java 之后,就变得没那么容易,一方面是需要编译,另一方面是一般都是基于 spring 的项目,如果问题定位比较模糊,那框架层的是很难靠简单的 System.out.println 或者打 log 解决,(PS:我觉得可能我写的东西比较适合从 PHP 这种弱类型语言转到 Java 的小白同学)这个时候一方面因为是 Java,有了非常好用的 idea IDE 的支持,可以各种花式调试,条件断点尤其牛叉,但是又因为有 Spring+Java 的双重原因,有些情况下单步调试可以把手按废掉,这也是我之前一直比较困惑苦逼的点,后来随着慢慢精(jiang)进(you)之后,比如对于一个 oom 的情况,我们可以通过启动参数加上-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=xx/xx 来配置溢出时的堆dump 日志,获取到这个文件后,我们可以通过像 Memory Analyzer (MAT)[https://www.eclipse.org/mat/] (The Eclipse Memory Analyzer is a fast and feature-rich Java heap analyzer that helps you find memory leaks and reduce memory consumption.)来查看诊断问题所在,之前用到的时候是因为有个死循环一直往链表里塞数据,属于比较简单的,后来一次是由于运维进行应用迁移时按默认的统一配置了堆内存大小,导致内存的确不够用,所以溢出了, 今天想说的其实主要是我们的 thread dump,这也是我最近才真正用的一个方法,可能真的很小白了,用过 ide 的单步调试其实都知道会有一个一层层的玩意,比如函数从 A,调用了 B,再从 B 调用了 C,一直往下(因为是 Java,所以还有很多🤦♂️),这个其实也是大部分语言的调用模型,利用了栈这个数据结构,通过这个结构我们可以知道代码的调用链路,由于对于一个 spring 应用,在本身框架代码量非常庞大的情况下,外加如果应用代码也是非常多的时候,有时候通过单步调试真的很难短时间定位到问题,需要非常大的耐心和仔细观察,当然不是说完全不行,举个例子当我的应用经常启动需要非常长的时间,因为本身应用有非常多个 bean,比较难说究竟是 bean 的加载的确很慢还是有什么异常原因,这种时候就可以使用 thread dump 了,具体怎么操作呢 如果在idea 中运行或者调试时,可以直接点击这个照相机一样的按钮,右边就会出现了左边会显示所有的线程,右边会显示线程栈,
+
"main@1" prio=5 tid=0x1 nid=NA runnable
+ java.lang.Thread.State: RUNNABLE
+ at TreeDistance.treeDist(TreeDistance.java:64)
+ at TreeDistance.treeDist(TreeDistance.java:65)
+ at TreeDistance.treeDist(TreeDistance.java:65)
+ at TreeDistance.treeDist(TreeDistance.java:65)
+ at TreeDistance.main(TreeDistance.java:45)
"main@1" prio=5 tid=0x1 nid=NA runnable
- java.lang.Thread.State: RUNNABLE
- at TreeDistance.treeDist(TreeDistance.java:64)
- at TreeDistance.treeDist(TreeDistance.java:65)
- at TreeDistance.treeDist(TreeDistance.java:65)
- at TreeDistance.treeDist(TreeDistance.java:65)
- at TreeDistance.main(TreeDistance.java:45)
再一点就是情节是大众都能接受度比较高的,现在有很多普遍会找一些新奇的视角,比如卖腐,想某某令,两部都叫某某令,这其实是一个点,延伸出去就是跟前面说的一点有点类似,xx 老祖,人看着就二三十,叫 xx 老祖,(喜欢的人轻喷哈)然后名字有一堆,同一个人物一会叫这个名字,一会又叫另一个名字,然后一堆死表情。
再一点就是情节是大众都能接受度比较高的,现在有很多普遍会找一些新奇的视角,比如卖腐,想某某令,两部都叫某某令,这其实是一个点,延伸出去就是跟前面说的一点有点类似,xx 老祖,人看着就二三十,叫 xx 老祖,(喜欢的人轻喷哈)然后名字有一堆,同一个人物一会叫这个名字,一会又叫另一个名字,然后一堆死表情。
\ No newline at end of file
diff --git a/tags/java/page/2/index.html b/tags/java/page/2/index.html
index 29b24d9437..a9152353f3 100644
--- a/tags/java/page/2/index.html
+++ b/tags/java/page/2/index.html
@@ -1 +1 @@
-标签: Java | Nicksxs's Blog
\ No newline at end of file
diff --git a/tags/java/page/5/index.html b/tags/java/page/5/index.html
index 71b5745d55..50b19f4598 100644
--- a/tags/java/page/5/index.html
+++ b/tags/java/page/5/index.html
@@ -1 +1 @@
-标签: java | Nicksxs's Blog
script src=/js/third-party/comments/disqus.js>
\ No newline at end of file
diff --git a/tags/java/page/7/index.html b/tags/java/page/7/index.html
index 93146808bc..4ca492e694 100644
--- a/tags/java/page/7/index.html
+++ b/tags/java/page/7/index.html
@@ -1 +1 @@
-标签: java | Nicksxs's Blog
ipt>
\ No newline at end of file
diff --git a/tags/mysql/page/2/index.html b/tags/mysql/page/2/index.html
index 96a8cec610..15db9ed599 100644
--- a/tags/mysql/page/2/index.html
+++ b/tags/mysql/page/2/index.html
@@ -1 +1 @@
-标签: Mysql | Nicksxs's Blog