Compare commits
680 Commits
feature-HA
...
trunk
Author | SHA1 | Date |
---|---|---|
YuCheng Hu | 6ed3394bf6 | |
hfutatzhanghb | 35158db711 | |
Steve Loughran | 7a45ef4164 | |
zhangshuyan | 9c989515ba | |
Steve Loughran | 7bb09f1010 | |
hfutatzhanghb | 0e6bd09ae3 | |
hfutatzhanghb | ddae78b0ec | |
huhaiyang | 0c209961f8 | |
Viraj Jasani | 1dbaba8e70 | |
slfan1989 | 9de13f879a | |
slfan1989 | e6937d7076 | |
slfan1989 | fd3c3ae068 | |
zhtttylz | d9980ab40f | |
caozhiqiang | 5d6ca13c5c | |
slfan1989 | 241398de3b | |
mudit-97 | e69a077af8 | |
hfutatzhanghb | 2243cfd225 | |
Ayush Saxena | 1d0c9ab433 | |
Xianming Lei | ee94f6cdcb | |
Szilard Nemeth | e0a339223a | |
Viraj Jasani | 03a499821c | |
Steve Loughran | 160b9fc3c9 | |
smarthan | 9f1e23cc67 | |
NishthaShah | f8b7ddf69c | |
Simbarashe Dzinamarira | d92a5815f4 | |
Marcono1234 | 9acf462d26 | |
hchaverri | 124313d215 | |
slfan1989 | 86c250a54a | |
Xianming Lei | 97afb33c73 | |
slfan1989 | b977065cc4 | |
Steve Loughran | e6b54f7f68 | |
hfutatzhanghb | e9740cb17a | |
Tamas Domok | aeb3f6f1a8 | |
Ashutosh Gupta | a98d15804a | |
Viraj Jasani | 3b65b5d68f | |
Gautham B A | afe850ca2c | |
hfutatzhanghb | 5b22dc6ace | |
huhaiyang | af933f3a4f | |
Xianming Lei | 0110e24ed8 | |
NishthaShah | 9a524ede87 | |
zhtttylz | 408dbf318e | |
NishthaShah | 5272ed8670 | |
Keyao Li | 0914b3e792 | |
Keyao Li | 339bc7b3a6 | |
Xianming Lei | 441fb23293 | |
Patrick GRANDJEAN | 4627242c44 | |
LiuGuH | f6770dee47 | |
slfan1989 | bba663038d | |
Peter Szucs | ff8eac517a | |
jianghuazhu | 78cc528739 | |
Tsz-Wo Nicholas Sze | 350dcaf616 | |
wangzhaohui | 03163f9de2 | |
slfan1989 | 5ddaf2e133 | |
Viraj Jasani | 8e17385141 | |
liang3zy22 | 482897a0f6 | |
Steve Loughran | a90c722143 | |
Viraj Jasani | bef40e9427 | |
Chun Chen | 11af08d67a | |
smarthan | 251439d769 | |
slfan1989 | e0938b4c2a | |
slfan1989 | 2f87f716fa | |
Viraj Jasani | 5d0cc455f5 | |
zhtttylz | 0c77629849 | |
Steve Loughran | ad1e3a0f5b | |
susheel-gupta | 0f3406ac34 | |
WangYuanben | 905bfa84a8 | |
Steve Loughran | e76c09ac3b | |
Viraj Jasani | fe61d8f073 | |
zhtttylz | 5084e881ef | |
slfan1989 | 690db3c34b | |
cxzl25 | be50d221f5 | |
slfan1989 | d95b5c679d | |
slfan1989 | a2dda0ce03 | |
zhangshuyan | 03bf8f982a | |
slfan1989 | bdeca45294 | |
Gautham B A | a80e3dba3b | |
ZanderXu | 4ee92efb73 | |
slfan1989 | cda9863d54 | |
slfan1989 | eab4c33d09 | |
Gautham B A | c974710d8e | |
Chris | 372631c566 | |
Szilard Nemeth | c7699d3dcd | |
Viraj Jasani | ceb8878d4f | |
Dongjoon Hyun | 27776ac45e | |
Peter Szucs | bd607951c0 | |
Gautham B A | 0d06fd77de | |
slfan1989 | 476f60a806 | |
slfan1989 | c1d10f3872 | |
Tak Lon (Stephen) Wu | 0e46388474 | |
slfan1989 | 668c0a0930 | |
zhangshuyan | fddc9769a5 | |
Viraj Jasani | bfcf5dd03b | |
Szilard Nemeth | 73ca64a3ba | |
slfan1989 | 87e17b2713 | |
Gautham B A | 5147106b59 | |
SevenAddSix | 1079890ae3 | |
Pralabh Kumar | d75c6d9d57 | |
fanluoo | 1a2cd965a7 | |
Hexiaoqiao | 70c0aa342e | |
slfan1989 | 5ed7e912dc | |
wangzhaohui | 0e63152218 | |
Tsz-Wo Nicholas Sze | d9576bb9ee | |
Riya Khandelwal | 60a7e8acaa | |
yl09099 | 245fde17d7 | |
Steve Loughran | eb749ddd4d | |
slfan1989 | 55eebcf277 | |
slfan1989 | a716459cdf | |
cxzl25 | 5af0845076 | |
Steve Loughran | b6b9bd67bb | |
Sebastian Baunsgaard | 6aac6cb212 | |
cxzl25 | 2f66f0b83a | |
zhtttylz | c9e0af9961 | |
Tsz-Wo Nicholas Sze | dc78849f27 | |
Tamas Domok | 05e6dc19ea | |
zhangshuyan | 6a23c376c9 | |
Doroszlai, Attila | 5b23224970 | |
wangzhaohui | 51dcbd1d61 | |
PJ Fanning | b683769fc9 | |
dependabot[bot] | 3b7783c549 | |
PJ Fanning | ad49ddda0e | |
LiuGuH | 742e07d9c3 | |
Ashutosh Gupta | 964c1902c8 | |
Christos Bisias | 9e24ed2196 | |
PJ Fanning | 0918c87fa2 | |
Nikita Eshkeev | d07356e60e | |
Neil | 1ff7a65b9f | |
Ayush Saxena | 9e3d5c754b | |
PJ Fanning | b6c0ec796e | |
rdingankar | 5119d0c72f | |
slfan1989 | a258f1f235 | |
slfan1989 | 635521db4c | |
Viraj Jasani | 0e3aafe6c0 | |
Steve Loughran | 405ed1dde6 | |
Steve Loughran | 6ea10cf41b | |
yl09099 | 2c4d6bf33d | |
Dongjoon Hyun | 0d1b4a3556 | |
slfan1989 | 0bcdea7912 | |
zhangshuyan | 0185afafea | |
dependabot[bot] | f1936d29f1 | |
Melissa You | 2b60d0c1f4 | |
slfan1989 | 06f9bdffa6 | |
slfan1989 | dd6d0ac510 | |
Steve Loughran | 7c3d94a032 | |
slfan1989 | bffa49a64f | |
Sadanand Shenoy | 74ddf69f80 | |
rdingankar | 3e2ae1da00 | |
mjwiq | e45451f9c7 | |
zhtttylz | 523ff81624 | |
Simbarashe Dzinamarira | 47c22e388e | |
Viraj Jasani | 422bf3b24c | |
slfan1989 | 69b90b5698 | |
HarshitGupta11 | dfb2ca0a64 | |
Viraj Jasani | 937caf7de9 | |
Chris Nauroth | 14c5810d5e | |
zhtttylz | 811441d5bc | |
slfan1989 | eb1d3ebe2f | |
sreeb-msft | 389b3ea6e3 | |
Galsza | 016362a28b | |
Viraj Jasani | b4bcbb9515 | |
slfan1989 | 5bc8f25327 | |
slfan1989 | aa602381c5 | |
zhangshuyan | 700147b4ac | |
Jinhu Wu | b5e8269d9b | |
slfan1989 | 926993cb73 | |
Anmol Asrani | 762d3ddb43 | |
Andras Katona | ee01c64c6c | |
Ayush Saxena | b82bcbd8ad | |
Andras Katona | 72b0122706 | |
Tamas Domok | 69748aae32 | |
Kidd5368 | 5cf62d1498 | |
zhaixiaojuan@loongson.cn | 028cde0006 | |
Ayush Saxena | e3cb9573e1 | |
Yubi Lee | 67e02a92e0 | |
Viraj Jasani | 0dbe1d3284 | |
Viraj Jasani | 9a8287c36f | |
Viraj Jasani | f8d0949f7d | |
Viraj Jasani | b6a9d7b442 | |
slfan1989 | fa723ae839 | |
Pranav Saxena | 759ddebb13 | |
Masatake Iwasaki | 7c42d0f7da | |
Viraj Jasani | 15935fa865 | |
Viraj Jasani | cf4a678ce9 | |
Viraj Jasani | 405bfa2800 | |
Stephen O'Donnell | eee2ea075d | |
Viraj Jasani | aff840c59c | |
Steve Loughran | 09469bf47d | |
PJ Fanning | 476340c699 | |
Stephen O'Donnell | ca6f5afb6d | |
nao | 734f7abfb8 | |
slfan1989 | b406060c6b | |
rohit-kb | 487368c4b9 | |
Pranav Saxena | 358bf80c94 | |
slfan1989 | 927401886a | |
zhangshuyan | 2cb0c35fc1 | |
Varun Saxena | 2a0dc2ab2f | |
ZanderXu | 6bd2444815 | |
Viraj Jasani | e1ca466bdb | |
Tom | 162288bc0a | |
Viraj Jasani | 2ab7eb4caa | |
Szilard Nemeth | 8f6be3678d | |
Viraj Jasani | 28d2753d2f | |
slfan1989 | bcc51ce2c5 | |
Steve Loughran | dcd9dc6983 | |
rdingankar | 0ca5686034 | |
Simbarashe Dzinamarira | 61f369c43e | |
slfan1989 | 8798b94ee1 | |
Viraj Jasani | a90238c0b8 | |
slfan1989 | 25ebd0b8b1 | |
slfan1989 | 27a54955f9 | |
Steve Loughran | 4067facae6 | |
Steve Loughran | e2d7919dc1 | |
Owen O'Malley | 8025a60ae7 | |
Simbarashe Dzinamarira | 4cc33e5e37 | |
slfan1989 | 2e997d818d | |
Ayush Saxena | e8a6b2c2c4 | |
hchaverr | fb31393b65 | |
Steve Loughran | 11a220c6e7 | |
susheel-gupta | 49b8ac19f2 | |
Ayush Saxena | fe5bb49ad9 | |
slfan1989 | 4e6e2f318c | |
nao | acf82d4d55 | |
Viraj Jasani | 88914cada0 | |
zhtttylz | a3b500d046 | |
slfan1989 | 7e486038ea | |
Arnout Engelen | 02fd87a4d8 | |
Steve Loughran | 10e7ca481c | |
Bryan Beaudreault | 7e19bc31b6 | |
Mehakmeet Singh | 7a0903b743 | |
hfutatzhanghb | 723535b788 | |
slfan1989 | a5f48eacca | |
slfan1989 | c3706597a3 | |
Ankit Saurabh | f4f2793f3b | |
Zita Dombi | 4cbe19f3a2 | |
Steve Loughran | d56977e909 | |
SimhadriGovindappa | e2ab35084a | |
Viraj Jasani | 021fcc6c5e | |
GuoPhilipse | fe0541b58d | |
Ayush Saxena | 1def35d802 | |
Viraj Jasani | 90de1ff151 | |
Ayush Saxena | 30f560554d | |
Tamas Domok | e4b5314991 | |
Steve Vaughan | f42c89dffb | |
hfutatzhanghb | f3c4277576 | |
Owen O'Malley | 26fba8701c | |
Tamas Domok | 151b71d7af | |
Viraj Jasani | 17c8cdf63c | |
Mehakmeet Singh | 9e4f50d8a0 | |
huhaiyang | 113a9e40cb | |
huhaiyang | d5c046518e | |
Viraj Jasani | 4fcceff535 | |
slfan1989 | af20841fb1 | |
Steve Vaughan | 08f58ecf07 | |
He Xiaoqiao | 3ba058a894 | |
He Xiaoqiao | 7e919212c4 | |
hfutatzhanghb | eb04ecd29d | |
hchaverri | d310642626 | |
gardenia | 8714403dc7 | |
Viraj Jasani | f02c452cf1 | |
slfan1989 | a6a9fe17e0 | |
Steve Vaughan | 5f5157ac53 | |
Steve Vaughan | aed6fcee5b | |
hfutatzhanghb | be564f5c20 | |
sunhao | 0ae075a2af | |
jokercurry | dad73b76c0 | |
Viraj Jasani | bce388fd3f | |
Ankit Saurabh | 22f6d55b71 | |
Viraj Jasani | ad0cff2f97 | |
Masatake Iwasaki | 6d325d9d09 | |
Masatake Iwasaki | a70f84098f | |
huhaiyang | 88c8ac750d | |
Wei-Chiu Chuang | 9d47108b50 | |
Ayush Saxena | 952d707240 | |
Szilard Nemeth | b677d40ab5 | |
Steve Loughran | 970ebaeded | |
slfan1989 | 468135a4d9 | |
Szilard Nemeth | cf1b3711cb | |
Szilard Nemeth | 815cde9810 | |
Szilard Nemeth | 29f2230cb6 | |
Szilard Nemeth | 8eda456d37 | |
kevin wan | 3b7b79b37a | |
slfan1989 | 3f767a61b1 | |
zhtttylz | 72b760130a | |
Viraj Jasani | 04f3573f6a | |
slfan1989 | 442a5fb285 | |
Nikita Eshkeev | 4de31123ce | |
PJ Fanning | d81d98388c | |
Ashutosh Gupta | 38453f8589 | |
Ashutosh Gupta | 082266516a | |
slfan1989 | 168fa07801 | |
slfan1989 | 4520448327 | |
Viraj Jasani | 1263e024b9 | |
skysiders | 36bf54aba0 | |
huangxiaoping | a90e424d9f | |
slfan1989 | 3d21cff263 | |
Simbarashe Dzinamarira | f26d8bc9bd | |
Szilard Nemeth | 7f6cc196f8 | |
huhaiyang | e3b09b7512 | |
Chengbing Liu | 4cf304de45 | |
huhaiyang | f3cff032e6 | |
Dongjoon Hyun | 6f99558c2e | |
Simbarashe Dzinamarira | b56d483258 | |
ahmarsuhail | 9c6eeb699e | |
Riya Khandelwal | dd49077aed | |
Surendra Singh Lilhore | a65d24488a | |
Simbarashe Dzinamarira | cd19da1309 | |
PJ Fanning | b9eb760ed2 | |
Tsz-Wo Nicholas Sze | 5022003e0f | |
Yubi Lee | 4511c360b9 | |
huhaiyang | 35ce60eadd | |
slfan1989 | 0926fa5a2c | |
susheel-gupta | c44c9f984b | |
Ayush Saxena | b93b1c69cc | |
Chris Nauroth | 6b67373d10 | |
curie71 | 9668a85d40 | |
Neil | d25c1be517 | |
Akira Ajisaka | 049d1762bd | |
Bence Kosztolnik | bf8ab83cd0 | |
ZanderXu | df093ef9af | |
slfan1989 | 17035da46e | |
susheel-gupta | e6056d128a | |
ZanderXu | 15b52fb6a4 | |
David Dillon | b63b777c84 | |
ZanderXu | 8d221255f2 | |
Daniel-009497 | 7ff326129d | |
陈爽-Jack Chen | f6605f1b3a | |
Steve Loughran | 52c72fafe4 | |
PJ Fanning | 6a07b5dc10 | |
Steve Loughran | 33785fc5ad | |
Chengbing Liu | ca3526da92 | |
Xing Lin | f7bdf6c667 | |
Happy-shi | c5b42d59d2 | |
Steve Loughran | cf1244492d | |
Steve Loughran | 5f08e51b72 | |
Steve Loughran | f7b1bb4dcc | |
Mehakmeet Singh | 32414cfe46 | |
slfan1989 | 6172c3192d | |
Steve Loughran | aaf92fe183 | |
slfan1989 | 63b9a6a2b6 | |
Doroszlai, Attila | 4de8791deb | |
Steve Loughran | 1cecf8ab70 | |
Ashutosh Gupta | 85ec7969a7 | |
curie71 | fdcbc8b072 | |
slfan1989 | a71aaef9a9 | |
Jack Richard Buggins | a46b20d25f | |
Steve Loughran | 0a7dfcc332 | |
Anurag P | e76616f690 | |
dingshun3016 | 2fa540dca1 | |
K0K0V0K | ee7d1787cd | |
Oleksandr Shevchenko | 0a4528cd7f | |
Pranav Saxena | c67c2b7569 | |
Murali Krishna | 2e88096266 | |
slfan1989 | f71fd885be | |
Akshat Bordia | 86ac1ad9e5 | |
Gautham B A | dadd3d9138 | |
Steve Loughran | b666075a41 | |
Steve Loughran | 84b33b897c | |
ZanderXu | 8a9bdb1edc | |
dingshun3016 | 02afb9ebe1 | |
slfan1989 | 60e0fe8709 | |
slfan1989 | 4af4997e11 | |
Szilard Nemeth | 5440c75c4a | |
litao | 2067fcb646 | |
Anmol Asrani | 7786600744 | |
Kidd5368 | 72749a4ff8 | |
Owen O'Malley | 03471a736c | |
HarshitGupta11 | 0ef572abed | |
caozhiqiang | 35c65005d0 | |
Simbarashe Dzinamarira | 909aeca86c | |
Simbarashe Dzinamarira | ec2856d79c | |
slfan1989 | f93167e678 | |
sreeb-msft | 1a7acc403b | |
PJ Fanning | e09e81abe4 | |
slfan1989 | 1ddc9091f6 | |
ZanderXu | 87429f443a | |
ZanderXu | e0974298ce | |
huhaiyang | ef84d21867 | |
ZanderXu | bcc3d2a20e | |
huhaiyang | dfa9edacce | |
Ashutosh Gupta | 2c1158e858 | |
huhaiyang | ac958777af | |
slfan1989 | 7cb22eb72d | |
Szilard Nemeth | 3c37a01654 | |
litao | 8f971b0e54 | |
zhengchenyu | dc2fba45fe | |
GuoPhilipse | 069bd973d8 | |
Ashutosh Gupta | 696d042054 | |
Ashutosh Gupta | 2e993fdf4e | |
Ashutosh Gupta | dcde414570 | |
Owen O'Malley | c71a68ca80 | |
Owen O'Malley | 1ea5db52dd | |
Hu Xinqiu | 7d39abd799 | |
slfan1989 | eccd2d0492 | |
Szilard Nemeth | 142df247ed | |
Lei Yang | cd929457c9 | |
Mehakmeet Singh | 69e50c7b44 | |
Ashutosh Gupta | a48e8c9beb | |
slfan1989 | 04b31d7ecf | |
ZanderXu | d3c1c453f0 | |
Szilard Nemeth | 22c9f28f4d | |
PJ Fanning | d340c4a7a1 | |
Szilard Nemeth | 5bb11cecea | |
Simbarashe Dzinamarira | 552ee44eba | |
slfan1989 | b398a7b003 | |
zhengchenyu | f68f1a4578 | |
ted12138 | 7002e214b8 | |
Steve Loughran | 7f9ca101e2 | |
slfan1989 | 845cf8bc28 | |
Simbarashe Dzinamarira | 44b8bb7224 | |
Takanobu Asanuma | 660530205e | |
slfan1989 | 5d6ab15860 | |
Steve Vaughan | 2ba982a061 | |
huhaiyang | e9319e696c | |
slfan1989 | b90dfdff3f | |
Ashutosh Gupta | e62ba16a02 | |
Ashutosh Gupta | 83acb55981 | |
Ashutosh Gupta | 69225ae5b9 | |
wangteng13 | 388f2f182f | |
PJ Fanning | 7ba304d1c6 | |
Daniel Carl Jones | 0b577992ef | |
Steve Loughran | 3b10cb5a3b | |
Gautham B A | b1f418f802 | |
sabertiger | af7dd660e0 | |
Ashutosh Gupta | cbe02c2e77 | |
Samrat | e04c9e810b | |
Ashutosh Gupta | c096803387 | |
PJ Fanning | d6a65a4180 | |
Ashutosh Gupta | 2aae7ffe08 | |
slfan1989 | 070a2d4880 | |
slfan1989 | b1cd88c598 | |
Chris Nauroth | bfb84cd7f6 | |
jianghuazhu | 88f7f5bc01 | |
M1eyu2018 | 8396caa484 | |
huhaiyang | d26c35b228 | |
Takanobu Asanuma | 545a556883 | |
slfan1989 | ba77530ff4 | |
Bence Kosztolnik | 562b693374 | |
Mehakmeet Singh | fba46aa5bb | |
Wang Yu | 37bff63c0f | |
FuzzingTeam | f140506d67 | |
Ashutosh Gupta | 0a26d84df1 | |
Ashutosh Gupta | e6edbf1b4b | |
Ashutosh Gupta | 21b7790866 | |
slfan1989 | 454157a384 | |
Gautham B A | 833750f72a | |
PJ Fanning | aac87ffe76 | |
slfan1989 | d93e6f0cbb | |
Willi Raschkowski | c4aa41aa80 | |
FuzzingTeam | 7f69e09290 | |
Sneha Vijayarajan | a996d889ec | |
slfan1989 | 9adf0ca089 | |
jianghuazhu | c5c00f3d2c | |
slfan1989 | 48b6f9f335 | |
Viraj Jasani | 8aa04b0b24 | |
Daniel Carl Jones | 6207ac47e0 | |
Steve Loughran | d80db6c9e5 | |
slfan1989 | ee886cacd7 | |
Hexiaoqiao | babb050fa3 | |
Ankit Saurabh | 2d91daab5e | |
Ashutosh Gupta | 9a8aff69ff | |
ZanderXu | 136291d2d5 | |
PJ Fanning | 4ff6c9b8de | |
Szilard Nemeth | b0d5182c31 | |
ahmarsuhail | 77e551a478 | |
slfan1989 | 5b52123c9d | |
slfan1989 | 1962851356 | |
slfan1989 | 647457e6ab | |
PJ Fanning | bfce21ee08 | |
monthonk | 9439d8e4e4 | |
slfan1989 | 3ff8f58f8c | |
slfan1989 | 1ff7e84caf | |
Ashutosh Gupta | d6b1e1eeb6 | |
Gautham B A | 5694d7e25f | |
slfan1989 | d78b0b39a6 | |
slfan1989 | 9e16f1f883 | |
slfan1989 | 82a88a8ae6 | |
Gautham B A | 2122733c30 | |
belugabehr | 03d600fa82 | |
huhaiyang | d14b88c698 | |
PJ Fanning | 4fe079f85f | |
Szilard Nemeth | 0c515b0ef0 | |
ZanderXu | 62ff4e36cf | |
ZanderXu | b0b2cb4a16 | |
Mukund Thakur | be70bbb4be | |
Steve Loughran | 540a660429 | |
Ashutosh Gupta | 9a7d0e7ed0 | |
ZanderXu | b0bfd09c41 | |
PJ Fanning | 5eddec8c46 | |
Ashutosh Gupta | 062c50db6b | |
PJ Fanning | 8336b91329 | |
slfan1989 | b31b3ea0f6 | |
Daniel Carl Jones | 7ec762a5fd | |
Alessandro Passaro | 1675a28e5a | |
Steve Loughran | 38b2ed2151 | |
slfan1989 | 1a9faf123d | |
slfan1989 | a708ff96f1 | |
slfan1989 | 874a004347 | |
slfan1989 | 22bd5e3b53 | |
Riya Khandelwal | 07581f1ab2 | |
Navink | 4891bf5049 | |
Mukund Thakur | e22f5e75ae | |
slfan1989 | 42d883937d | |
slfan1989 | bfd6415827 | |
Mukund Thakur | 735e35d648 | |
Ashutosh Gupta | d9f435f6ac | |
slfan1989 | 5d20988f9f | |
slfan1989 | 0e65f4cc04 | |
slfan1989 | aeba204fa2 | |
Viraj Jasani | 648071e197 | |
Ashutosh Gupta | 7923cac86b | |
Ashutosh Gupta | 603e9bd745 | |
Ashutosh Gupta | fd0415c44a | |
Xing Lin | 747fb92107 | |
Steve Loughran | 0676495950 | |
Kidd5368 | 9a29075f91 | |
slfan1989 | e526f48fa4 | |
PJ Fanning | e6d2c336cb | |
Viraj Jasani | 084b68e380 | |
Samrat | 740e1ef357 | |
Ashutosh Gupta | 917aef75fc | |
slfan1989 | 4c5a7cc6fc | |
slfan1989 | 4d9bb81b16 | |
GuoPhilipse | 40ab9c8ba7 | |
Ashutosh Gupta | 2950c5405b | |
slfan1989 | fd687bb4c4 | |
Ashutosh Gupta | a134628d1b | |
Viraj Jasani | 5b1657278c | |
ZanderXu | e68006cd70 | |
slfan1989 | f52b900a5f | |
slfan1989 | 342c4856b8 | |
GuoPhilipse | 620dd37712 | |
Ashutosh Gupta | 30c36ef25a | |
ZanderXu | a73c4804d8 | |
Ashutosh Gupta | 0f03299eba | |
ZanderXu | 43c1ebae16 | |
Ashutosh Gupta | 59d3c20118 | |
Simbarashe Dzinamarira | 6422eaf301 | |
GuoPhilipse | ce54b7e55d | |
Colm O hEigeartaigh | 272844ee57 | |
slfan1989 | 86b84ed74e | |
Renukaprasad C | 8ce71c8882 | |
slfan1989 | 88545a875d | |
Ashutosh Gupta | 55d8a91b2c | |
slfan1989 | 3ce353395b | |
Ashutosh Gupta | 65a027b112 | |
slfan1989 | cde1f3af21 | |
caozhiqiang | 1923096adb | |
Ashutosh Gupta | 21bae31d58 | |
slfan1989 | cdcb448b78 | |
slfan1989 | b2760520c3 | |
Simbarashe Dzinamarira | e77d54d1ee | |
slfan1989 | e76ffbf102 | |
Mukund Thakur | 8732625f50 | |
9uapaw | 5b85af87f0 | |
Viraj Jasani | 56387cce57 | |
ZanderXu | 4a01fadb94 | |
slfan1989 | 29c4d8d8f7 | |
slfan1989 | 0db3ee5b4b | |
Mehakmeet Singh | 03961b10c2 | |
Ashutosh Gupta | 832d0e0d76 | |
PJ Fanning | 42c8f61fec | |
Erik Krogen | c664f953c9 | |
Ayush Saxena | cc41ad63f9 | |
ZanderXu | c947c326e8 | |
Sumangala Patki | 7bcf853ff4 | |
ZanderXu | be4c638e4c | |
ZanderXu | ac42519ade | |
slfan1989 | 7bf95d7949 | |
slfan1989 | 1965708d49 | |
slfan1989 | b266f852d7 | |
slfan1989 | 3a96de7756 | |
ZanderXu | 7b239a80fe | |
sreeb-msft | c48ed3e96c | |
slfan1989 | 37e213c3fc | |
monthonk | 20560401ec | |
Steve Vaughan | 2dd8b1342e | |
slfan1989 | 33edbed54c | |
Mukund Thakur | 19830c98bc | |
9uapaw | 84081a8cae | |
Steve Loughran | c69e16b297 | |
Gautham B A | c334ba89ad | |
slfan1989 | 8a47ed6f84 | |
Masatake Iwasaki | 22835be63d | |
Ashutosh Gupta | 90dba8b614 | |
Samrat | 2c05015716 | |
zhangshuyan0 | 71778a6cc5 | |
slfan1989 | c60a900583 | |
slfan1989 | 0075ef15c2 | |
slfan1989 | 4031b0774e | |
ZanderXu | 5567154f71 | |
slfan1989 | f8b9dd911c | |
Gautham B A | 5736b34b2a | |
zhengchenyu | 231a4468cd | |
ahmarsuhail | 7fb9c306e2 | |
ZanderXu | 1691cccc89 | |
Ayush Saxena | 880686d1e3 | |
ZanderXu | 8d4f51c432 | |
slfan1989 | 75aff247ae | |
slfan1989 | 052d7f286e | |
Steve Loughran | de37fd37d6 | |
Steve Vaughan | 1ff121041c | |
Simba Dzinamarira | 4890ba5052 | |
ZanderXu | c37f01d95b | |
Simba Dzinamarira | a3b1bafa34 | |
Steve Vaughan | 6fbc38db95 | |
ZanderXu | 183f09b1da | |
Viraj Jasani | c249db80c2 | |
slfan1989 | eda4bb5dcd | |
Steve Vaughan | 17daad34d4 | |
Mukund Thakur | 231e095802 | |
Steve Vaughan | a9e5fb3313 | |
Ashutosh Gupta | c294a414b9 | |
jianghuazhu | 7f176d080c | |
Clara Fang | c870171182 | |
Ashutosh Gupta | b253b3be9f | |
slfan1989 | f75c58a1ca | |
Viraj Jasani | 7f030250b4 | |
Steve Vaughan | b7d4dc61bf | |
Ashutosh Gupta | d09dd4a0b9 | |
Steve Loughran | 682931a6ac | |
slfan1989 | cd72f7e042 | |
Steve Vaughan | e40b3a3089 | |
Ashutosh Gupta | 5cc8c574d1 | |
Ashutosh Gupta | 86abeb401e | |
Ashutosh Gupta | f02ff1afe2 | |
Viraj Jasani | d55d76e1e2 | |
xuzq | 622ca0d51f | |
slfan1989 | eff3b8c59a | |
xuzq | b1d4af2492 | |
Hui Fei | 86cc96c493 | |
Steve Loughran | 906ae5138e | |
Steve Loughran | eee59a8372 | |
Steve Loughran | ad83e95046 | |
slfan1989 | ab88e4b65d | |
Paul King | d0fdb1d6e0 | |
slfan1989 | d383cc4525 | |
zhengchenyu | 9f6bbc90a8 | |
kevins-29 | b737869e01 | |
xuzq | e0c8c6eed4 | |
xuzq | 521e65acfe | |
Viraj Jasani | 8c9533a0f8 | |
xuzq | 59619ad247 | |
Steve Vaughan | 2005582a28 | |
slfan1989 | 6ca2d3f848 | |
xuzq | 09cabaad68 | |
Mukund Thakur | b28e4c6904 | |
huaxiangsun | e9509ac467 | |
Yubi Lee | c0bbdca97e | |
slfan1989 | 133e8aabf0 | |
slfan1989 | ffa9ed93a4 | |
Ashutosh Gupta | 92abd99450 | |
slfan1989 | 977f4b6165 | |
xuzq | 895f7c51fd | |
Viraj Jasani | 06f0f7db79 | |
slfan1989 | d8d3325d2f | |
Ashutosh Gupta | 1cda2dcb6e | |
slfan1989 | 52c2d99889 | |
xuzq | 25ccdc77af | |
Ashutosh Gupta | bd0f9a46e1 | |
ahmarsuhail | b5642c5638 | |
Steve Loughran | 62dbefd8f2 | |
Ayush Saxena | 080e67039d | |
slfan1989 | 6f7c4c74ea | |
Ashutosh Gupta | 0aa08ef543 | |
zhangshuyan0 | dbf73e16b1 | |
xuzq | 8eebf40b1a | |
Mukund Thakur | 66dec9d322 | |
slfan1989 | c5ec727435 | |
slfan1989 | 6463f86f83 | |
slfan1989 | c5eba323bc | |
Steve Vaughan | 0fc7dd8228 | |
xuzq | 0f36539d60 | |
Ashutosh Gupta | 69f6fdb757 | |
slfan1989 | 1f0a71a92b | |
slfan1989 | 57da4bb0a1 | |
ahmarsuhail | 123d1aa884 | |
slfan1989 | 13fbfd5dea | |
slfan1989 | 2680f17eb4 | |
slfan1989 | e994635a95 | |
Mukund Thakur | a5b12c8010 | |
9uapaw | bf570bd4ac |
|
@ -14,6 +14,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
github:
|
||||
ghp_path: /
|
||||
ghp_branch: gh-pages
|
||||
enabled_merge_buttons:
|
||||
squash: true
|
||||
merge: false
|
||||
|
@ -22,4 +24,4 @@ notifications:
|
|||
commits: common-commits@hadoop.apache.org
|
||||
issues: common-issues@hadoop.apache.org
|
||||
pullrequests: common-issues@hadoop.apache.org
|
||||
jira_options: link label worklog
|
||||
jira_options: comment link label
|
|
@ -0,0 +1,59 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
name: website
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
push:
|
||||
branches: [ trunk ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Hadoop trunk
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: apache/hadoop
|
||||
- name: Set up JDK 8
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '8'
|
||||
distribution: 'temurin'
|
||||
- name: Cache local Maven repository
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- name: Build Hadoop maven plugins
|
||||
run: cd hadoop-maven-plugins && mvn --batch-mode install
|
||||
- name: Build Hadoop
|
||||
run: mvn clean install -DskipTests -DskipShade
|
||||
- name: Build document
|
||||
run: mvn clean site
|
||||
- name: Stage document
|
||||
run: mvn site:stage -DstagingDirectory=${GITHUB_WORKSPACE}/staging/
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./staging/hadoop-project
|
||||
user_name: 'github-actions[bot]'
|
||||
user_email: 'github-actions[bot]@users.noreply.github.com'
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
dev-support/docker/Dockerfile_windows_10
|
113
BUILDING.txt
113
BUILDING.txt
|
@ -492,39 +492,66 @@ Building on CentOS 8
|
|||
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
Building on Windows
|
||||
Building on Windows 10
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Requirements:
|
||||
|
||||
* Windows System
|
||||
* Windows 10
|
||||
* JDK 1.8
|
||||
* Maven 3.0 or later
|
||||
* Boost 1.72
|
||||
* Protocol Buffers 3.7.1
|
||||
* CMake 3.19 or newer
|
||||
* Visual Studio 2010 Professional or Higher
|
||||
* Windows SDK 8.1 (if building CPU rate control for the container executor)
|
||||
* zlib headers (if building native code bindings for zlib)
|
||||
* Maven 3.0 or later (maven.apache.org)
|
||||
* Boost 1.72 (boost.org)
|
||||
* Protocol Buffers 3.7.1 (https://github.com/protocolbuffers/protobuf/releases)
|
||||
* CMake 3.19 or newer (cmake.org)
|
||||
* Visual Studio 2019 (visualstudio.com)
|
||||
* Windows SDK 8.1 (optional, if building CPU rate control for the container executor. Get this from
|
||||
http://msdn.microsoft.com/en-us/windows/bg162891.aspx)
|
||||
* Zlib (zlib.net, if building native code bindings for zlib)
|
||||
* Git (preferably, get this from https://git-scm.com/download/win since the package also contains
|
||||
Unix command-line tools that are needed during packaging).
|
||||
* Python (python.org, for generation of docs using 'mvn site')
|
||||
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
||||
* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
|
||||
tools must be present on your PATH.
|
||||
* Python ( for generation of docs using 'mvn site')
|
||||
|
||||
Unix command-line tools are also included with the Windows Git package which
|
||||
can be downloaded from http://git-scm.com/downloads
|
||||
|
||||
If using Visual Studio, it must be Professional level or higher.
|
||||
Do not use Visual Studio Express. It does not support compiling for 64-bit,
|
||||
which is problematic if running a 64-bit system.
|
||||
|
||||
The Windows SDK 8.1 is available to download at:
|
||||
|
||||
http://msdn.microsoft.com/en-us/windows/bg162891.aspx
|
||||
|
||||
Cygwin is not required.
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
Building guidelines:
|
||||
|
||||
Hadoop repository provides the Dockerfile for building Hadoop on Windows 10, located at
|
||||
dev-support/docker/Dockerfile_windows_10. It is highly recommended to use this and create the
|
||||
Docker image for building Hadoop on Windows 10, since you don't have to install anything else
|
||||
other than Docker and no additional steps are required in terms of aligning the environment with
|
||||
the necessary paths etc.
|
||||
|
||||
However, if you still prefer taking the route of not using Docker, this Dockerfile_windows_10 will
|
||||
still be immensely useful as a raw guide for all the steps involved in creating the environment
|
||||
needed to build Hadoop on Windows 10.
|
||||
|
||||
Building using the Docker:
|
||||
We first need to build the Docker image for building Hadoop on Windows 10. Run this command from
|
||||
the root of the Hadoop repository.
|
||||
> docker build -t hadoop-windows-10-builder -f .\dev-support\docker\Dockerfile_windows_10 .\dev-support\docker\
|
||||
|
||||
Start the container with the image that we just built.
|
||||
> docker run --rm -it hadoop-windows-10-builder
|
||||
|
||||
You can now clone the Hadoop repo inside this container and proceed with the build.
|
||||
|
||||
NOTE:
|
||||
While one may perceive the idea of mounting the locally cloned (on the host filesystem) Hadoop
|
||||
repository into the container (using the -v option), we have seen the build to fail owing to some
|
||||
files not being able to be located by Maven. Thus, we suggest cloning the Hadoop repository to a
|
||||
non-mounted folder inside the container and proceed with the build. When the build is completed,
|
||||
you may use the "docker cp" command to copy the built Hadoop tar.gz file from the docker container
|
||||
to the host filesystem. If you still would like to mount the Hadoop codebase, a workaround would
|
||||
be to copy the mounted Hadoop codebase into another folder (which doesn't point to a mount) in the
|
||||
container's filesystem and use this for building.
|
||||
|
||||
However, we noticed no build issues when the Maven repository from the host filesystem was mounted
|
||||
into the container. One may use this to greatly reduce the build time. Assuming that the Maven
|
||||
repository is located at D:\Maven\Repository in the host filesystem, one can use the following
|
||||
command to mount the same onto the default Maven repository location while launching the container.
|
||||
> docker run --rm -v D:\Maven\Repository:C:\Users\ContainerAdministrator\.m2\repository -it hadoop-windows-10-builder
|
||||
|
||||
Building:
|
||||
|
||||
Keep the source code tree in a short path to avoid running into problems related
|
||||
|
@ -540,6 +567,24 @@ configure the bit-ness of the build, and set several optional components.
|
|||
Several tests require that the user must have the Create Symbolic Links
|
||||
privilege.
|
||||
|
||||
To simplify the installation of Boost, Protocol buffers, OpenSSL and Zlib dependencies we can use
|
||||
vcpkg (https://github.com/Microsoft/vcpkg.git). Upon cloning the vcpkg repo, checkout the commit
|
||||
7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d to get the required versions of the dependencies
|
||||
mentioned above.
|
||||
> git clone https://github.com/Microsoft/vcpkg.git
|
||||
> cd vcpkg
|
||||
> git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d
|
||||
> .\bootstrap-vcpkg.bat
|
||||
> .\vcpkg.exe install boost:x64-windows
|
||||
> .\vcpkg.exe install protobuf:x64-windows
|
||||
> .\vcpkg.exe install openssl:x64-windows
|
||||
> .\vcpkg.exe install zlib:x64-windows
|
||||
|
||||
Set the following environment variables -
|
||||
(Assuming that vcpkg was checked out at C:\vcpkg)
|
||||
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||
> set MAVEN_OPTS=-Xmx2048M -Xss128M
|
||||
|
||||
All Maven goals are the same as described above with the exception that
|
||||
native code is built by enabling the 'native-win' Maven profile. -Pnative-win
|
||||
is enabled by default when building on Windows since the native components
|
||||
|
@ -557,6 +602,24 @@ the zlib 1.2.7 source tree.
|
|||
|
||||
http://www.zlib.net/
|
||||
|
||||
|
||||
Build command:
|
||||
The following command builds all the modules in the Hadoop project and generates the tar.gz file in
|
||||
hadoop-dist/target upon successful build. Run these commands from an
|
||||
"x64 Native Tools Command Prompt for VS 2019" which can be found under "Visual Studio 2019" in the
|
||||
Windows start menu. If you're using the Docker image from Dockerfile_windows_10, you'll be
|
||||
logged into "x64 Native Tools Command Prompt for VS 2019" automatically when you start the
|
||||
container.
|
||||
|
||||
> set classpath=
|
||||
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||
> mvn clean package -Dhttps.protocols=TLSv1.2 -DskipTests -DskipDocs -Pnative-win,dist^
|
||||
-Drequire.openssl -Drequire.test.libhadoop -Pyarn-ui -Dshell-executable=C:\Git\bin\bash.exe^
|
||||
-Dtar -Dopenssl.prefix=C:\vcpkg\installed\x64-windows^
|
||||
-Dcmake.prefix.path=C:\vcpkg\installed\x64-windows^
|
||||
-Dwindows.cmake.toolchain.file=C:\vcpkg\scripts\buildsystems\vcpkg.cmake -Dwindows.cmake.build.type=RelWithDebInfo^
|
||||
-Dwindows.build.hdfspp.dll=off -Dwindows.no.sasl=on -Duse.platformToolsetVersion=v142
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Building distributions:
|
||||
|
||||
|
|
112
LICENSE-binary
112
LICENSE-binary
|
@ -210,22 +210,22 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/nvd3-1.8.5.* (css and js
|
|||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
|
||||
|
||||
com.aliyun:aliyun-java-sdk-core:3.4.0
|
||||
com.aliyun:aliyun-java-sdk-ecs:4.2.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.0.0
|
||||
com.aliyun:aliyun-java-sdk-core:4.5.10
|
||||
com.aliyun:aliyun-java-sdk-kms:2.11.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.1.0
|
||||
com.aliyun:aliyun-java-sdk-sts:3.0.0
|
||||
com.aliyun.oss:aliyun-sdk-oss:3.13.2
|
||||
com.amazonaws:aws-java-sdk-bundle:1.12.262
|
||||
com.amazonaws:aws-java-sdk-bundle:1.12.316
|
||||
com.cedarsoftware:java-util:1.9.0
|
||||
com.cedarsoftware:json-io:2.5.1
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.12.7
|
||||
com.fasterxml.jackson.core:jackson-core:2.12.7
|
||||
com.fasterxml.jackson.core:jackson-databind:2.12.7
|
||||
com.fasterxml.jackson.core:jackson-databind:2.12.7.1
|
||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
|
||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
|
||||
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
|
||||
com.fasterxml.uuid:java-uuid-generator:3.1.4
|
||||
com.fasterxml.woodstox:woodstox-core:5.3.0
|
||||
com.fasterxml.woodstox:woodstox-core:5.4.0
|
||||
com.github.davidmoten:rxjava-extras:0.8.0.17
|
||||
com.github.stephenc.jcip:jcip-annotations:1.0-1
|
||||
com.google:guice:4.0
|
||||
|
@ -240,18 +240,17 @@ com.google.guava:guava:20.0
|
|||
com.google.guava:guava:27.0-jre
|
||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||
com.microsoft.azure:azure-storage:7.0.0
|
||||
com.nimbusds:nimbus-jose-jwt:9.8.1
|
||||
com.squareup.okhttp3:okhttp:4.9.3
|
||||
com.squareup.okio:okio:1.6.0
|
||||
com.nimbusds:nimbus-jose-jwt:9.31
|
||||
com.squareup.okhttp3:okhttp:4.10.0
|
||||
com.squareup.okio:okio:3.2.0
|
||||
com.zaxxer:HikariCP:4.0.3
|
||||
commons-beanutils:commons-beanutils:1.9.3
|
||||
commons-cli:commons-cli:1.2
|
||||
commons-beanutils:commons-beanutils:1.9.4
|
||||
commons-cli:commons-cli:1.5.0
|
||||
commons-codec:commons-codec:1.11
|
||||
commons-collections:commons-collections:3.2.2
|
||||
commons-daemon:commons-daemon:1.0.13
|
||||
commons-io:commons-io:2.8.0
|
||||
commons-logging:commons-logging:1.1.3
|
||||
commons-net:commons-net:3.6
|
||||
commons-net:commons-net:3.9.0
|
||||
de.ruedigermoeller:fst:2.50
|
||||
io.grpc:grpc-api:1.26.0
|
||||
io.grpc:grpc-context:1.26.0
|
||||
|
@ -260,7 +259,6 @@ io.grpc:grpc-netty:1.26.0
|
|||
io.grpc:grpc-protobuf:1.26.0
|
||||
io.grpc:grpc-protobuf-lite:1.26.0
|
||||
io.grpc:grpc-stub:1.26.0
|
||||
io.netty:netty:3.10.6.Final
|
||||
io.netty:netty-all:4.1.77.Final
|
||||
io.netty:netty-buffer:4.1.77.Final
|
||||
io.netty:netty-codec:4.1.77.Final
|
||||
|
@ -301,16 +299,15 @@ javax.inject:javax.inject:1
|
|||
log4j:log4j:1.2.17
|
||||
net.java.dev.jna:jna:5.2.0
|
||||
net.minidev:accessors-smart:1.2
|
||||
net.minidev:json-smart:2.4.7
|
||||
org.apache.avro:avro:1.9.2
|
||||
org.apache.commons:commons-collections4:4.2
|
||||
org.apache.commons:commons-compress:1.21
|
||||
org.apache.commons:commons-configuration2:2.1.1
|
||||
org.apache.commons:commons-csv:1.0
|
||||
org.apache.commons:commons-configuration2:2.8.0
|
||||
org.apache.commons:commons-csv:1.9.0
|
||||
org.apache.commons:commons-digester:1.8.1
|
||||
org.apache.commons:commons-lang3:3.12.0
|
||||
org.apache.commons:commons-math3:3.1.1
|
||||
org.apache.commons:commons-text:1.4
|
||||
org.apache.commons:commons-math3:3.6.1
|
||||
org.apache.commons:commons-text:1.10.0
|
||||
org.apache.commons:commons-validator:1.6
|
||||
org.apache.curator:curator-client:5.2.0
|
||||
org.apache.curator:curator-framework:5.2.0
|
||||
|
@ -324,46 +321,49 @@ org.apache.htrace:htrace-core:3.1.0-incubating
|
|||
org.apache.htrace:htrace-core4:4.1.0-incubating
|
||||
org.apache.httpcomponents:httpclient:4.5.6
|
||||
org.apache.httpcomponents:httpcore:4.4.10
|
||||
org.apache.kafka:kafka-clients:2.8.1
|
||||
org.apache.kerby:kerb-admin:1.0.1
|
||||
org.apache.kerby:kerb-client:1.0.1
|
||||
org.apache.kerby:kerb-common:1.0.1
|
||||
org.apache.kerby:kerb-core:1.0.1
|
||||
org.apache.kerby:kerb-crypto:1.0.1
|
||||
org.apache.kerby:kerb-identity:1.0.1
|
||||
org.apache.kerby:kerb-server:1.0.1
|
||||
org.apache.kerby:kerb-simplekdc:1.0.1
|
||||
org.apache.kerby:kerb-util:1.0.1
|
||||
org.apache.kerby:kerby-asn1:1.0.1
|
||||
org.apache.kerby:kerby-config:1.0.1
|
||||
org.apache.kerby:kerby-pkix:1.0.1
|
||||
org.apache.kerby:kerby-util:1.0.1
|
||||
org.apache.kerby:kerby-xdr:1.0.1
|
||||
org.apache.kerby:token-provider:1.0.1
|
||||
org.apache.kafka:kafka-clients:2.8.2
|
||||
org.apache.kerby:kerb-admin:2.0.3
|
||||
org.apache.kerby:kerb-client:2.0.3
|
||||
org.apache.kerby:kerb-common:2.0.3
|
||||
org.apache.kerby:kerb-core:2.0.3
|
||||
org.apache.kerby:kerb-crypto:2.0.3
|
||||
org.apache.kerby:kerb-identity:2.0.3
|
||||
org.apache.kerby:kerb-server:2.0.3
|
||||
org.apache.kerby:kerb-simplekdc:2.0.3
|
||||
org.apache.kerby:kerb-util:2.0.3
|
||||
org.apache.kerby:kerby-asn1:2.0.3
|
||||
org.apache.kerby:kerby-config:2.0.3
|
||||
org.apache.kerby:kerby-pkix:2.0.3
|
||||
org.apache.kerby:kerby-util:2.0.3
|
||||
org.apache.kerby:kerby-xdr:2.0.3
|
||||
org.apache.kerby:token-provider:2.0.3
|
||||
org.apache.solr:solr-solrj:8.8.2
|
||||
org.apache.yetus:audience-annotations:0.5.0
|
||||
org.apache.zookeeper:zookeeper:3.6.3
|
||||
org.codehaus.jettison:jettison:1.1
|
||||
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-http:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-io:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-jndi:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-plus:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-security:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-server:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-servlet:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-util:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-util-ajax:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-webapp:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-xml:9.4.48.v20220622
|
||||
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.48.v20220622
|
||||
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.48.v20220622
|
||||
org.codehaus.jettison:jettison:1.5.4
|
||||
org.eclipse.jetty:jetty-annotations:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-http:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-io:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-jndi:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-plus:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-security:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-server:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-servlet:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-util:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-util-ajax:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-webapp:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-xml:9.4.51.v20230217
|
||||
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.51.v20230217
|
||||
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.51.v20230217
|
||||
org.ehcache:ehcache:3.3.1
|
||||
org.ini4j:ini4j:0.5.4
|
||||
org.jetbrains.kotlin:kotlin-stdlib:1.4.10
|
||||
org.jetbrains.kotlin:kotlin-stdlib-common:1.4.10
|
||||
org.lz4:lz4-java:1.7.1
|
||||
org.objenesis:objenesis:2.6
|
||||
org.xerial.snappy:snappy-java:1.0.5
|
||||
org.yaml:snakeyaml:1.16:
|
||||
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
|
||||
org.yaml:snakeyaml:2.0
|
||||
org.wildfly.openssl:wildfly-openssl:1.1.3.Final
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
@ -427,7 +427,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
|||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||
|
@ -435,7 +435,7 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanage
|
|||
bootstrap v3.3.6
|
||||
broccoli-asset-rev v2.4.2
|
||||
broccoli-funnel v1.0.1
|
||||
datatables v1.10.19
|
||||
datatables v1.11.5
|
||||
em-helpers v0.5.13
|
||||
em-table v0.1.6
|
||||
ember v2.2.0
|
||||
|
@ -518,12 +518,14 @@ Eclipse Public License 1.0
|
|||
--------------------------
|
||||
|
||||
junit:junit:4.13.2
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
|
||||
|
||||
|
||||
HSQL License
|
||||
------------
|
||||
|
||||
org.hsqldb:hsqldb:2.3.4
|
||||
org.hsqldb:hsqldb:2.7.1
|
||||
|
||||
|
||||
JDOM License
|
||||
|
|
|
@ -252,7 +252,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
|||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||
|
|
|
@ -20,6 +20,20 @@
|
|||
# Override these to match Apache Hadoop's requirements
|
||||
personality_plugins "all,-ant,-gradle,-scalac,-scaladoc"
|
||||
|
||||
# These flags are needed to run Yetus against Hadoop on Windows.
|
||||
WINDOWS_FLAGS="-Pnative-win
|
||||
-Dhttps.protocols=TLSv1.2
|
||||
-Drequire.openssl
|
||||
-Drequire.test.libhadoop
|
||||
-Dshell-executable=${BASH_EXECUTABLE}
|
||||
-Dopenssl.prefix=${VCPKG_INSTALLED_PACKAGES}
|
||||
-Dcmake.prefix.path=${VCPKG_INSTALLED_PACKAGES}
|
||||
-Dwindows.cmake.toolchain.file=${CMAKE_TOOLCHAIN_FILE}
|
||||
-Dwindows.cmake.build.type=RelWithDebInfo
|
||||
-Dwindows.build.hdfspp.dll=off
|
||||
-Dwindows.no.sasl=on
|
||||
-Duse.platformToolsetVersion=v142"
|
||||
|
||||
## @description Globals specific to this personality
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
@ -87,17 +101,30 @@ function hadoop_order
|
|||
echo "${hadoopm}"
|
||||
}
|
||||
|
||||
## @description Determine if it is safe to run parallel tests
|
||||
## @description Retrieves the Hadoop project version defined in the root pom.xml
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @param ordering
|
||||
function hadoop_test_parallel
|
||||
## @returns 0 on success, 1 on failure
|
||||
function load_hadoop_version
|
||||
{
|
||||
if [[ -f "${BASEDIR}/pom.xml" ]]; then
|
||||
HADOOP_VERSION=$(grep '<version>' "${BASEDIR}/pom.xml" \
|
||||
| head -1 \
|
||||
| "${SED}" -e 's|^ *<version>||' -e 's|</version>.*$||' \
|
||||
| cut -f1 -d- )
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Determine if it is safe to run parallel tests
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @param ordering
|
||||
function hadoop_test_parallel
|
||||
{
|
||||
if load_hadoop_version; then
|
||||
export HADOOP_VERSION
|
||||
else
|
||||
return 1
|
||||
|
@ -262,7 +289,10 @@ function hadoop_native_flags
|
|||
Windows_NT|CYGWIN*|MINGW*|MSYS*)
|
||||
echo \
|
||||
"${args[@]}" \
|
||||
-Drequire.snappy -Drequire.openssl -Pnative-win
|
||||
-Drequire.snappy \
|
||||
-Pdist \
|
||||
-Dtar \
|
||||
"${WINDOWS_FLAGS}"
|
||||
;;
|
||||
*)
|
||||
echo \
|
||||
|
@ -405,7 +435,10 @@ function personality_modules
|
|||
extra="${extra} ${flags}"
|
||||
fi
|
||||
|
||||
extra="-Ptest-patch ${extra}"
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
extra="-Ptest-patch -Pdist -Dtar ${WINDOWS_FLAGS} ${extra}"
|
||||
fi
|
||||
|
||||
for module in $(hadoop_order ${ordering}); do
|
||||
# shellcheck disable=SC2086
|
||||
personality_enqueue_module ${module} ${extra}
|
||||
|
@ -548,17 +581,28 @@ function shadedclient_rebuild
|
|||
|
||||
big_console_header "Checking client artifacts on ${repostatus} with shaded clients"
|
||||
|
||||
extra="-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true"
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
if load_hadoop_version; then
|
||||
export HADOOP_HOME="${SOURCEDIR}/hadoop-dist/target/hadoop-${HADOOP_VERSION}-SNAPSHOT"
|
||||
else
|
||||
yetus_error "[WARNING] Unable to extract the Hadoop version and thus HADOOP_HOME is not set. Some tests may fail."
|
||||
fi
|
||||
|
||||
extra="${WINDOWS_FLAGS} ${extra}"
|
||||
fi
|
||||
|
||||
echo_and_redirect "${logfile}" \
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
|
||||
"${modules[@]}" \
|
||||
-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am "${modules[@]}" "${extra}"
|
||||
|
||||
big_console_header "Checking client artifacts on ${repostatus} with non-shaded clients"
|
||||
|
||||
echo_and_redirect "${logfile}" \
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
|
||||
"${modules[@]}" \
|
||||
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
|
||||
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true \
|
||||
-Dspotbugs.skip=true "${extra}"
|
||||
|
||||
count=$("${GREP}" -c '\[ERROR\]' "${logfile}")
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
|
|
|
@ -171,7 +171,17 @@ if [[ -n "${GPGBIN}" && ! "${HADOOP_SKIP_YETUS_VERIFICATION}" = true ]]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
gunzip -c "${TARBALL}.gz" | tar xpf -
|
||||
|
||||
# One of the entries in the Yetus tarball unzips a symlink qbt.sh.
|
||||
# The symlink creation fails on Windows, unless this CI is run as Admin or Developer mode is
|
||||
# enabled.
|
||||
# Thus, we create the qbt.sh symlink ourselves and move it to the target.
|
||||
YETUS_PRECOMMIT_DIR="${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/precommit"
|
||||
ln -s "${YETUS_PRECOMMIT_DIR}/test-patch.sh" qbt.sh
|
||||
mv qbt.sh "${YETUS_PRECOMMIT_DIR}"
|
||||
elif ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
|
||||
yetus_error "ERROR: ${TARBALL}.gz is corrupt. Investigate and then remove ${HADOOP_PATCHPROCESS} to try again."
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -74,7 +74,7 @@ ENV PATH "${PATH}:/opt/protobuf/bin"
|
|||
###
|
||||
# Avoid out of memory errors in builds
|
||||
###
|
||||
ENV MAVEN_OPTS -Xms256m -Xmx1536m
|
||||
ENV MAVEN_OPTS -Xms256m -Xmx3072m
|
||||
|
||||
# Skip gpg verification when downloading Yetus via yetus-wrapper
|
||||
ENV HADOOP_SKIP_YETUS_VERIFICATION true
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Dockerfile for installing the necessary dependencies for building Hadoop.
|
||||
# See BUILDING.txt.
|
||||
|
||||
FROM mcr.microsoft.com/windows:ltsc2019
|
||||
|
||||
# Need to disable the progress bar for speeding up the downloads.
|
||||
# hadolint ignore=SC2086
|
||||
RUN powershell $Global:ProgressPreference = 'SilentlyContinue'
|
||||
|
||||
# Restore the default Windows shell for correct batch processing.
|
||||
SHELL ["cmd", "/S", "/C"]
|
||||
|
||||
# Install Visual Studio 2019 Build Tools.
|
||||
RUN curl -SL --output vs_buildtools.exe https://aka.ms/vs/16/release/vs_buildtools.exe \
|
||||
&& (start /w vs_buildtools.exe --quiet --wait --norestart --nocache \
|
||||
--installPath "%ProgramFiles(x86)%\Microsoft Visual Studio\2019\BuildTools" \
|
||||
--add Microsoft.VisualStudio.Workload.VCTools \
|
||||
--add Microsoft.VisualStudio.Component.VC.ASAN \
|
||||
--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 \
|
||||
--add Microsoft.VisualStudio.Component.Windows10SDK.19041 \
|
||||
|| IF "%ERRORLEVEL%"=="3010" EXIT 0) \
|
||||
&& del /q vs_buildtools.exe
|
||||
|
||||
# Install Chocolatey.
|
||||
RUN powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))"
|
||||
RUN setx PATH "%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
|
||||
|
||||
# Install git.
|
||||
RUN choco install git.install -y
|
||||
RUN powershell Copy-Item -Recurse -Path 'C:\Program Files\Git' -Destination C:\Git
|
||||
|
||||
# Install vcpkg.
|
||||
# hadolint ignore=DL3003
|
||||
RUN powershell git clone https://github.com/microsoft/vcpkg.git \
|
||||
&& cd vcpkg \
|
||||
&& git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d \
|
||||
&& .\bootstrap-vcpkg.bat
|
||||
RUN powershell .\vcpkg\vcpkg.exe install boost:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install protobuf:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install openssl:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install zlib:x64-windows
|
||||
ENV PROTOBUF_HOME "C:\vcpkg\installed\x64-windows"
|
||||
|
||||
# Install Azul Java 8 JDK.
|
||||
RUN powershell Invoke-WebRequest -URI https://cdn.azul.com/zulu/bin/zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -OutFile $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -DestinationPath "C:\Java"
|
||||
ENV JAVA_HOME "C:\Java\zulu8.62.0.19-ca-jdk8.0.332-win_x64"
|
||||
RUN setx PATH "%PATH%;%JAVA_HOME%\bin"
|
||||
|
||||
# Install Apache Maven.
|
||||
RUN powershell Invoke-WebRequest -URI https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.zip -OutFile $Env:TEMP\apache-maven-3.8.6-bin.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\apache-maven-3.8.6-bin.zip -DestinationPath "C:\Maven"
|
||||
RUN setx PATH "%PATH%;C:\Maven\apache-maven-3.8.6\bin"
|
||||
ENV MAVEN_OPTS '-Xmx2048M -Xss128M'
|
||||
|
||||
# Install CMake 3.19.0.
|
||||
RUN powershell Invoke-WebRequest -URI https://cmake.org/files/v3.19/cmake-3.19.0-win64-x64.zip -OutFile $Env:TEMP\cmake-3.19.0-win64-x64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\cmake-3.19.0-win64-x64.zip -DestinationPath "C:\CMake"
|
||||
RUN setx PATH "%PATH%;C:\CMake\cmake-3.19.0-win64-x64\bin"
|
||||
|
||||
# Install zstd 1.5.4.
|
||||
RUN powershell Invoke-WebRequest -Uri https://github.com/facebook/zstd/releases/download/v1.5.4/zstd-v1.5.4-win64.zip -OutFile $Env:TEMP\zstd-v1.5.4-win64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\zstd-v1.5.4-win64.zip -DestinationPath "C:\ZStd"
|
||||
RUN setx PATH "%PATH%;C:\ZStd"
|
||||
|
||||
# Install libopenssl 3.1.0 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libopenssl-3.1.0-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst -o $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibOpenSSL"
|
||||
RUN powershell tar -xvf $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar -C "C:\LibOpenSSL"
|
||||
|
||||
# Install libxxhash 0.8.1 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libxxhash-0.8.1-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst -o $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibXXHash"
|
||||
RUN powershell tar -xvf $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar -C "C:\LibXXHash"
|
||||
|
||||
# Install libzstd 1.5.4 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libzstd-1.5.4-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst -o $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibZStd"
|
||||
RUN powershell tar -xvf $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar -C "C:\LibZStd"
|
||||
|
||||
# Install rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/rsync-3.2.7-2-x86_64.pkg.tar.zst -OutFile $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst -o $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\RSync"
|
||||
RUN powershell tar -xvf $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar -C "C:\RSync"
|
||||
# Copy the dependencies of rsync 3.2.7.
|
||||
RUN powershell Copy-Item -Path "C:\LibOpenSSL\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\LibXXHash\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\LibZStd\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\RSync\usr\bin\*" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
|
||||
# Install Python 3.10.11.
|
||||
RUN powershell Invoke-WebRequest -Uri https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip -OutFile $Env:TEMP\python-3.10.11-embed-amd64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\python-3.10.11-embed-amd64.zip -DestinationPath "C:\Python3"
|
||||
RUN powershell New-Item -ItemType HardLink -Value "C:\Python3\python.exe" -Path "C:\Python3\python3.exe"
|
||||
RUN setx path "%PATH%;C:\Python3"
|
||||
|
||||
# We get strange Javadoc errors without this.
|
||||
RUN setx classpath ""
|
||||
|
||||
RUN git config --global core.longpaths true
|
||||
RUN setx PATH "%PATH%;C:\Program Files\Git\usr\bin"
|
||||
|
||||
# Define the entry point for the docker container.
|
||||
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\VC\\Auxiliary\\Build\\vcvars64.bat", "&&", "cmd.exe"]
|
|
@ -48,7 +48,7 @@ is_platform_change() {
|
|||
declare in_path
|
||||
in_path="${SOURCEDIR}"/"${1}"
|
||||
|
||||
for path in "${SOURCEDIR}"/dev-support/docker/Dockerfile* "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
|
||||
for path in "${DOCKERFILE}" "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
|
||||
if [ "${in_path}" == "${path}" ]; then
|
||||
echo "Found C/C++ platform related changes in ${in_path}"
|
||||
return 0
|
||||
|
@ -114,6 +114,26 @@ function check_ci_run() {
|
|||
function run_ci() {
|
||||
TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh"
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
echo "Building in a Windows environment, skipping some Yetus related settings"
|
||||
else
|
||||
# run in docker mode and specifically point to our
|
||||
# Dockerfile since we don't want to use the auto-pulled version.
|
||||
YETUS_ARGS+=("--docker")
|
||||
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
|
||||
YETUS_ARGS+=("--mvn-custom-repos")
|
||||
YETUS_ARGS+=("--dockermemlimit=22g")
|
||||
|
||||
# test with Java 8 and 11
|
||||
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdktests=compile")
|
||||
fi
|
||||
|
||||
if [[ "$IS_NIGHTLY_BUILD" && "$IS_NIGHTLY_BUILD" == 1 ]]; then
|
||||
YETUS_ARGS+=("--empty-patch")
|
||||
YETUS_ARGS+=("--branch=${BRANCH_NAME}")
|
||||
else
|
||||
# this must be clean for every run
|
||||
if [[ -d "${PATCHDIR}" ]]; then
|
||||
rm -rf "${PATCHDIR:?}"
|
||||
|
@ -132,6 +152,11 @@ function run_ci() {
|
|||
exit 0
|
||||
fi
|
||||
|
||||
# write Yetus report as GitHub comment (YETUS-1102)
|
||||
YETUS_ARGS+=("--github-write-comment")
|
||||
YETUS_ARGS+=("--github-use-emoji-vote")
|
||||
fi
|
||||
|
||||
YETUS_ARGS+=("--patch-dir=${PATCHDIR}")
|
||||
|
||||
# where the source is located
|
||||
|
@ -156,7 +181,6 @@ function run_ci() {
|
|||
# changing these to higher values may cause problems
|
||||
# with other jobs on systemd-enabled machines
|
||||
YETUS_ARGS+=("--proclimit=5500")
|
||||
YETUS_ARGS+=("--dockermemlimit=22g")
|
||||
|
||||
# -1 spotbugs issues that show up prior to the patch being applied
|
||||
YETUS_ARGS+=("--spotbugs-strict-precheck")
|
||||
|
@ -175,30 +199,15 @@ function run_ci() {
|
|||
# much attention to them
|
||||
YETUS_ARGS+=("--tests-filter=checkstyle")
|
||||
|
||||
# run in docker mode and specifically point to our
|
||||
# Dockerfile since we don't want to use the auto-pulled version.
|
||||
YETUS_ARGS+=("--docker")
|
||||
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
|
||||
YETUS_ARGS+=("--mvn-custom-repos")
|
||||
|
||||
# effectively treat dev-suport as a custom maven module
|
||||
YETUS_ARGS+=("--skip-dirs=dev-support")
|
||||
|
||||
# help keep the ASF boxes clean
|
||||
YETUS_ARGS+=("--sentinel")
|
||||
|
||||
# test with Java 8 and 11
|
||||
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdktests=compile")
|
||||
|
||||
# custom javadoc goals
|
||||
YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork")
|
||||
|
||||
# write Yetus report as GitHub comment (YETUS-1102)
|
||||
YETUS_ARGS+=("--github-write-comment")
|
||||
YETUS_ARGS+=("--github-use-emoji-vote")
|
||||
|
||||
"${TESTPATCHBIN}" "${YETUS_ARGS[@]}"
|
||||
}
|
||||
|
||||
|
|
|
@ -98,13 +98,6 @@
|
|||
<createSourcesJar>true</createSourcesJar>
|
||||
<shadeSourcesContent>true</shadeSourcesContent>
|
||||
</configuration>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -254,8 +247,7 @@
|
|||
</relocation>
|
||||
</relocations>
|
||||
<transformers>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resource>NOTICE.txt</resource>
|
||||
|
|
|
@ -671,13 +671,6 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -1052,8 +1045,7 @@
|
|||
</relocation>
|
||||
</relocations>
|
||||
<transformers>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resources>
|
||||
|
|
|
@ -128,13 +128,6 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -155,6 +148,7 @@
|
|||
<!-- Leave javax APIs that are stable -->
|
||||
<!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
|
||||
<exclude>com.google.code.findbugs:jsr305</exclude>
|
||||
<exclude>io.netty:*</exclude>
|
||||
<exclude>io.dropwizard.metrics:metrics-core</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-servlet</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-security</exclude>
|
||||
|
@ -163,6 +157,8 @@
|
|||
<exclude>org.bouncycastle:*</exclude>
|
||||
<!-- Leave snappy that includes native methods which cannot be relocated. -->
|
||||
<exclude>org.xerial.snappy:*</exclude>
|
||||
<!-- leave out kotlin classes -->
|
||||
<exclude>org.jetbrains.kotlin:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<filters>
|
||||
|
@ -397,8 +393,7 @@
|
|||
-->
|
||||
</relocations>
|
||||
<transformers>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resources>
|
||||
|
|
|
@ -69,6 +69,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
|
@ -182,6 +186,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
@ -233,6 +241,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-servlet</artifactId>
|
||||
|
@ -290,6 +302,10 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
|
|
@ -127,11 +127,6 @@
|
|||
<artifactId>hadoop-azure-datalake</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-openstack</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-cos</artifactId>
|
||||
|
|
|
@ -110,20 +110,8 @@
|
|||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk15on</artifactId>
|
||||
</exclusion>
|
||||
<!-- HACK. Transitive dependency for nimbus-jose-jwt. Needed for
|
||||
packaging. Please re-check this version when updating
|
||||
nimbus-jose-jwt. Please read HADOOP-14903 for more details.
|
||||
-->
|
||||
<exclusion>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>json-smart</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>json-smart</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -33,10 +37,10 @@ public class PlatformName {
|
|||
* per the java-vm.
|
||||
*/
|
||||
public static final String PLATFORM_NAME =
|
||||
(System.getProperty("os.name").startsWith("Windows")
|
||||
? System.getenv("os") : System.getProperty("os.name"))
|
||||
+ "-" + System.getProperty("os.arch")
|
||||
+ "-" + System.getProperty("sun.arch.data.model");
|
||||
(System.getProperty("os.name").startsWith("Windows") ?
|
||||
System.getenv("os") : System.getProperty("os.name"))
|
||||
+ "-" + System.getProperty("os.arch") + "-"
|
||||
+ System.getProperty("sun.arch.data.model");
|
||||
|
||||
/**
|
||||
* The java vendor name used in this platform.
|
||||
|
@ -44,10 +48,60 @@ public class PlatformName {
|
|||
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
|
||||
|
||||
/**
|
||||
* A public static variable to indicate the current java vendor is
|
||||
* IBM java or not.
|
||||
* Define a system class accessor that is open to changes in underlying implementations
|
||||
* of the system class loader modules.
|
||||
*/
|
||||
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
|
||||
private static final class SystemClassAccessor extends ClassLoader {
|
||||
public Class<?> getSystemClass(String className) throws ClassNotFoundException {
|
||||
return findSystemClass(className);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A public static variable to indicate the current java vendor is
|
||||
* IBM and the type is Java Technology Edition which provides its
|
||||
* own implementations of many security packages and Cipher suites.
|
||||
* Note that these are not provided in Semeru runtimes:
|
||||
* See https://developer.ibm.com/languages/java/semeru-runtimes for details.
|
||||
*/
|
||||
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM") &&
|
||||
hasIbmTechnologyEditionModules();
|
||||
|
||||
private static boolean hasIbmTechnologyEditionModules() {
|
||||
return Arrays.asList(
|
||||
"com.ibm.security.auth.module.JAASLoginModule",
|
||||
"com.ibm.security.auth.module.Win64LoginModule",
|
||||
"com.ibm.security.auth.module.NTLoginModule",
|
||||
"com.ibm.security.auth.module.AIX64LoginModule",
|
||||
"com.ibm.security.auth.module.LinuxLoginModule",
|
||||
"com.ibm.security.auth.module.Krb5LoginModule"
|
||||
).stream().anyMatch((module) -> isSystemClassAvailable(module));
|
||||
}
|
||||
|
||||
/**
|
||||
* In rare cases where different behaviour is performed based on the JVM vendor
|
||||
* this method should be used to test for a unique JVM class provided by the
|
||||
* vendor rather than using the vendor method. For example if on JVM provides a
|
||||
* different Kerberos login module testing for that login module being loadable
|
||||
* before configuring to use it is preferable to using the vendor data.
|
||||
*
|
||||
* @param className the name of a class in the JVM to test for
|
||||
* @return true if the class is available, false otherwise.
|
||||
*/
|
||||
private static boolean isSystemClassAvailable(String className) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
|
||||
try {
|
||||
// Using ClassLoader.findSystemClass() instead of
|
||||
// Class.forName(className, false, null) because Class.forName with a null
|
||||
// ClassLoader only looks at the boot ClassLoader with Java 9 and above
|
||||
// which doesn't look at all the modules available to the findSystemClass.
|
||||
new SystemClassAccessor().getSystemClass(className);
|
||||
return true;
|
||||
} catch (Exception ignored) {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(PLATFORM_NAME);
|
||||
|
|
|
@ -24,7 +24,7 @@ This filter must be configured in front of all the web application resources tha
|
|||
|
||||
The Hadoop Auth and dependent JAR files must be in the web application classpath (commonly the `WEB-INF/lib` directory).
|
||||
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application classpath as well as the Log4j configuration file.
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part of the web application classpath as well as the Log4j configuration file.
|
||||
|
||||
### Common Configuration parameters
|
||||
|
||||
|
|
|
@ -379,21 +379,6 @@
|
|||
<Bug code="JLM" />
|
||||
</Match>
|
||||
|
||||
<!--
|
||||
OpenStack Swift FS module -closes streams in a different method
|
||||
from where they are opened.
|
||||
-->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
||||
<Method name="uploadFileAttempt"/>
|
||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
||||
<Method name="uploadFilePartAttempt"/>
|
||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
||||
</Match>
|
||||
|
||||
<!-- code from maven source, null value is checked at callee side. -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -176,13 +176,16 @@
|
|||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
<!--
|
||||
adding jettison as direct dependency (as jersey-json's jettison dependency is vulnerable with verison 1.1),
|
||||
so those who depends on hadoop-common externally will get the non-vulnerable jettison
|
||||
-->
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -200,11 +203,6 @@
|
|||
<artifactId>assertj-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.glassfish.grizzly</groupId>
|
||||
<artifactId>grizzly-http-servlet</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
|
@ -342,6 +340,14 @@
|
|||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport-native-epoll</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.dropwizard.metrics</groupId>
|
||||
<artifactId>metrics-core</artifactId>
|
||||
|
@ -383,6 +389,11 @@
|
|||
<artifactId>mockwebserver</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.squareup.okio</groupId>
|
||||
<artifactId>okio-jvm</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>dnsjava</groupId>
|
||||
<artifactId>dnsjava</artifactId>
|
||||
|
@ -649,9 +660,10 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<executable>${basedir}/../../dev-support/bin/shelldocs</executable>
|
||||
<executable>${shell-executable}</executable>
|
||||
<workingDirectory>src/site/markdown</workingDirectory>
|
||||
<arguments>
|
||||
<argument>${basedir}/../../dev-support/bin/shelldocs</argument>
|
||||
<argument>--skipprnorep</argument>
|
||||
<argument>--output</argument>
|
||||
<argument>${basedir}/src/site/markdown/UnixShellAPI.md</argument>
|
||||
|
@ -841,6 +853,36 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!--Sets the skip.platformToolsetDetection to true if use.platformToolsetVersion is specified.
|
||||
This implies that the automatic detection of which platform toolset to use will be skipped
|
||||
and the one specified with use.platformToolsetVersion will be used.-->
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<exportAntProperties>true</exportAntProperties>
|
||||
<target>
|
||||
<condition property="skip.platformToolsetDetection" value="true" else="false">
|
||||
<isset property="use.platformToolsetVersion"/>
|
||||
</condition>
|
||||
<!--Unfortunately, Maven doesn't have a way to negate a flag, thus we declare a
|
||||
property which holds the negated value of skip.platformToolsetDetection.-->
|
||||
<condition property="skip.platformToolsetDetection.negated" value="false" else="true">
|
||||
<isset property="use.platformToolsetVersion"/>
|
||||
</condition>
|
||||
<echo>Skip platform toolset version detection = ${skip.platformToolsetDetection}</echo>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
|
@ -852,6 +894,7 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>${basedir}\..\..\dev-support\bin\win-vs-upgrade.cmd</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}\src\main\winutils</argument>
|
||||
|
@ -866,6 +909,7 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/winutils/winutils.sln</argument>
|
||||
|
@ -878,6 +922,27 @@
|
|||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>compile-ms-winutils-using-build-tools</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection.negated}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/winutils/winutils.sln</argument>
|
||||
<argument>/nologo</argument>
|
||||
<argument>/p:Configuration=Release</argument>
|
||||
<argument>/p:OutDir=${project.build.directory}/bin/</argument>
|
||||
<argument>/p:IntermediateOutputPath=${project.build.directory}/winutils/</argument>
|
||||
<argument>/p:WsceConfigDir=${wsce.config.dir}</argument>
|
||||
<argument>/p:WsceConfigFile=${wsce.config.file}</argument>
|
||||
<argument>/p:PlatformToolset=${use.platformToolsetVersion}</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>convert-ms-native-dll</id>
|
||||
<phase>generate-sources</phase>
|
||||
|
@ -885,6 +950,7 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>${basedir}\..\..\dev-support\bin\win-vs-upgrade.cmd</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}\src\main\native</argument>
|
||||
|
@ -899,6 +965,7 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/native/native.sln</argument>
|
||||
|
@ -919,6 +986,35 @@
|
|||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>compile-ms-native-dll-using-build-tools</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection.negated}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/native/native.sln</argument>
|
||||
<argument>/nologo</argument>
|
||||
<argument>/p:Configuration=Release</argument>
|
||||
<argument>/p:OutDir=${project.build.directory}/bin/</argument>
|
||||
<argument>/p:CustomZstdPrefix=${zstd.prefix}</argument>
|
||||
<argument>/p:CustomZstdLib=${zstd.lib}</argument>
|
||||
<argument>/p:CustomZstdInclude=${zstd.include}</argument>
|
||||
<argument>/p:RequireZstd=${require.zstd}</argument>
|
||||
<argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
|
||||
<argument>/p:CustomOpensslLib=${openssl.lib}</argument>
|
||||
<argument>/p:CustomOpensslInclude=${openssl.include}</argument>
|
||||
<argument>/p:RequireOpenssl=${require.openssl}</argument>
|
||||
<argument>/p:RequireIsal=${require.isal}</argument>
|
||||
<argument>/p:CustomIsalPrefix=${isal.prefix}</argument>
|
||||
<argument>/p:CustomIsalLib=${isal.lib}</argument>
|
||||
<argument>/p:PlatformToolset=${use.platformToolsetVersion}</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
@ -1151,7 +1247,7 @@
|
|||
<id>src-test-compile-protoc-legacy</id>
|
||||
<phase>generate-test-sources</phase>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
<goal>test-compile</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
|
@ -1160,7 +1256,7 @@
|
|||
com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<includeDependenciesInDescriptorSet>false</includeDependenciesInDescriptorSet>
|
||||
<protoSourceRoot>${basedir}/src/test/proto</protoSourceRoot>
|
||||
<protoTestSourceRoot>${basedir}/src/test/proto</protoTestSourceRoot>
|
||||
<outputDirectory>${project.build.directory}/generated-test-sources/java</outputDirectory>
|
||||
<clearOutputDirectory>false</clearOutputDirectory>
|
||||
<includes>
|
||||
|
|
|
@ -26,9 +26,9 @@ MYNAME="${BASH_SOURCE-$0}"
|
|||
function hadoop_usage
|
||||
{
|
||||
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode"
|
||||
hadoop_add_option "loglevel level" "set the log4j level for this command"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in worker mode"
|
||||
hadoop_add_option "workers" "turn on worker mode"
|
||||
|
||||
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
|
||||
# Run a Hadoop command on all slave hosts.
|
||||
# Run a Hadoop command on all worker hosts.
|
||||
|
||||
function hadoop_usage
|
||||
{
|
||||
|
|
|
@ -53,6 +53,10 @@
|
|||
# variable is REQUIRED on ALL platforms except OS X!
|
||||
# export JAVA_HOME=
|
||||
|
||||
# The language environment in which Hadoop runs. Use the English
|
||||
# environment to ensure that logs are printed as expected.
|
||||
export LANG=en_US.UTF-8
|
||||
|
||||
# Location of Hadoop. By default, Hadoop will attempt to determine
|
||||
# this location based upon its execution path.
|
||||
# export HADOOP_HOME=
|
||||
|
|
|
@ -75,14 +75,6 @@ log4j.appender.console.target=System.err
|
|||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
|
||||
#
|
||||
# TaskLog Appender
|
||||
#
|
||||
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
|
||||
|
||||
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
# HDFS block state change log from block manager
|
||||
#
|
||||
|
|
|
@ -98,7 +98,7 @@ public class ConfServlet extends HttpServlet {
|
|||
if (FORMAT_JSON.equals(format)) {
|
||||
Configuration.dumpConfiguration(conf, propertyName, out);
|
||||
} else if (FORMAT_XML.equals(format)) {
|
||||
conf.writeXml(propertyName, out);
|
||||
conf.writeXml(propertyName, out, conf);
|
||||
} else {
|
||||
throw new BadFormatException("Bad format: " + format);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
public class ConfigRedactor {
|
||||
|
||||
private static final String REDACTED_TEXT = "<redacted>";
|
||||
private static final String REDACTED_XML = "******";
|
||||
|
||||
private List<Pattern> compiledPatterns;
|
||||
|
||||
|
@ -84,4 +85,19 @@ public class ConfigRedactor {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a key / value pair, decides whether or not to redact and returns
|
||||
* either the original value or text indicating it has been redacted.
|
||||
*
|
||||
* @param key param key.
|
||||
* @param value param value, will return if conditions permit.
|
||||
* @return Original value, or text indicating it has been redacted
|
||||
*/
|
||||
public String redactXml(String key, String value) {
|
||||
if (configIsSensitive(key)) {
|
||||
return REDACTED_XML;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.ctc.wstx.io.SystemId;
|
|||
import com.ctc.wstx.stax.WstxInputFactory;
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.DataInput;
|
||||
|
@ -87,6 +86,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
|||
import org.apache.commons.collections.map.UnmodifiableMap;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -98,18 +98,19 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.alias.CredentialProvider;
|
||||
import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
|
||||
import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringInterner;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.XMLUtils;
|
||||
|
||||
import org.codehaus.stax2.XMLStreamReader2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
||||
|
||||
|
@ -3593,16 +3594,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* </ul>
|
||||
* @param propertyName xml property name.
|
||||
* @param out the writer to write to.
|
||||
* @param config configuration.
|
||||
* @throws IOException raised on errors performing I/O.
|
||||
*/
|
||||
public void writeXml(@Nullable String propertyName, Writer out)
|
||||
public void writeXml(@Nullable String propertyName, Writer out, Configuration config)
|
||||
throws IOException, IllegalArgumentException {
|
||||
Document doc = asXmlDocument(propertyName);
|
||||
ConfigRedactor redactor = config != null ? new ConfigRedactor(this) : null;
|
||||
Document doc = asXmlDocument(propertyName, redactor);
|
||||
|
||||
try {
|
||||
DOMSource source = new DOMSource(doc);
|
||||
StreamResult result = new StreamResult(out);
|
||||
TransformerFactory transFactory = TransformerFactory.newInstance();
|
||||
TransformerFactory transFactory = XMLUtils.newSecureTransformerFactory();
|
||||
Transformer transformer = transFactory.newTransformer();
|
||||
|
||||
// Important to not hold Configuration log while writing result, since
|
||||
|
@ -3614,11 +3617,16 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
}
|
||||
}
|
||||
|
||||
public void writeXml(@Nullable String propertyName, Writer out)
|
||||
throws IOException, IllegalArgumentException {
|
||||
writeXml(propertyName, out, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the XML DOM corresponding to this Configuration.
|
||||
*/
|
||||
private synchronized Document asXmlDocument(@Nullable String propertyName)
|
||||
throws IOException, IllegalArgumentException {
|
||||
private synchronized Document asXmlDocument(@Nullable String propertyName,
|
||||
ConfigRedactor redactor) throws IOException, IllegalArgumentException {
|
||||
Document doc;
|
||||
try {
|
||||
doc = DocumentBuilderFactory
|
||||
|
@ -3641,13 +3649,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
propertyName + " not found");
|
||||
} else {
|
||||
// given property is found, write single property
|
||||
appendXMLProperty(doc, conf, propertyName);
|
||||
appendXMLProperty(doc, conf, propertyName, redactor);
|
||||
conf.appendChild(doc.createTextNode("\n"));
|
||||
}
|
||||
} else {
|
||||
// append all elements
|
||||
for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) {
|
||||
appendXMLProperty(doc, conf, (String)e.nextElement());
|
||||
appendXMLProperty(doc, conf, (String)e.nextElement(), redactor);
|
||||
conf.appendChild(doc.createTextNode("\n"));
|
||||
}
|
||||
}
|
||||
|
@ -3663,7 +3671,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* @param propertyName
|
||||
*/
|
||||
private synchronized void appendXMLProperty(Document doc, Element conf,
|
||||
String propertyName) {
|
||||
String propertyName, ConfigRedactor redactor) {
|
||||
// skip writing if given property name is empty or null
|
||||
if (!Strings.isNullOrEmpty(propertyName)) {
|
||||
String value = properties.getProperty(propertyName);
|
||||
|
@ -3676,8 +3684,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
propNode.appendChild(nameNode);
|
||||
|
||||
Element valueNode = doc.createElement("value");
|
||||
valueNode.appendChild(doc.createTextNode(
|
||||
properties.getProperty(propertyName)));
|
||||
String propertyValue = properties.getProperty(propertyName);
|
||||
if (redactor != null) {
|
||||
propertyValue = redactor.redactXml(propertyName, propertyValue);
|
||||
}
|
||||
valueNode.appendChild(doc.createTextNode(propertyValue));
|
||||
propNode.appendChild(valueNode);
|
||||
|
||||
Element finalNode = doc.createElement("final");
|
||||
|
|
|
@ -240,13 +240,16 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
if (closed) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
try {
|
||||
flush();
|
||||
} finally {
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
codec.close();
|
||||
}
|
||||
freeBuffers();
|
||||
}
|
||||
} finally {
|
||||
closed = true;
|
||||
}
|
||||
|
|
|
@ -639,13 +639,14 @@ public abstract class KeyProvider implements Closeable {
|
|||
public abstract void flush() throws IOException;
|
||||
|
||||
/**
|
||||
* Split the versionName in to a base name. Converts "/aaa/bbb/3" to
|
||||
* Split the versionName in to a base name. Converts "/aaa/bbb@3" to
|
||||
* "/aaa/bbb".
|
||||
* @param versionName the version name to split
|
||||
* @return the base name of the key
|
||||
* @throws IOException raised on errors performing I/O.
|
||||
*/
|
||||
public static String getBaseName(String versionName) throws IOException {
|
||||
Objects.requireNonNull(versionName, "VersionName cannot be null");
|
||||
int div = versionName.lastIndexOf('@');
|
||||
if (div == -1) {
|
||||
throw new IOException("No version in key path " + versionName);
|
||||
|
|
|
@ -60,7 +60,6 @@ public class AvroFSInput implements Closeable, SeekableInput {
|
|||
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
||||
.withFileStatus(status)
|
||||
.build());
|
||||
fc.open(p);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -174,6 +174,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
private static final int HEADER_LENGTH = 8;
|
||||
|
||||
private int bytesPerSum = 1;
|
||||
private long fileLen = -1L;
|
||||
|
||||
public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file)
|
||||
throws IOException {
|
||||
|
@ -320,6 +321,18 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
return HEADER_LENGTH + (dataOffset/bytesPerSum) * FSInputChecker.CHECKSUM_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate length of file if not already cached.
|
||||
* @return file length.
|
||||
* @throws IOException any IOE.
|
||||
*/
|
||||
private long getFileLength() throws IOException {
|
||||
if (fileLen == -1L) {
|
||||
fileLen = fs.getFileStatus(file).getLen();
|
||||
}
|
||||
return fileLen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the checksum ranges that correspond to the given data ranges.
|
||||
* @param dataRanges the input data ranges, which are assumed to be sorted
|
||||
|
@ -371,13 +384,28 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
IntBuffer sums = sumsBytes.asIntBuffer();
|
||||
sums.position(offset / FSInputChecker.CHECKSUM_SIZE);
|
||||
ByteBuffer current = data.duplicate();
|
||||
int numChunks = data.remaining() / bytesPerSum;
|
||||
int numFullChunks = data.remaining() / bytesPerSum;
|
||||
boolean partialChunk = ((data.remaining() % bytesPerSum) != 0);
|
||||
int totalChunks = numFullChunks;
|
||||
if (partialChunk) {
|
||||
totalChunks++;
|
||||
}
|
||||
CRC32 crc = new CRC32();
|
||||
// check each chunk to ensure they match
|
||||
for(int c = 0; c < numChunks; ++c) {
|
||||
// set the buffer position and the limit
|
||||
current.limit((c + 1) * bytesPerSum);
|
||||
for(int c = 0; c < totalChunks; ++c) {
|
||||
// set the buffer position to the start of every chunk.
|
||||
current.position(c * bytesPerSum);
|
||||
|
||||
if (c == numFullChunks) {
|
||||
// During last chunk, there may be less than chunk size
|
||||
// data preset, so setting the limit accordingly.
|
||||
int lastIncompleteChunk = data.remaining() % bytesPerSum;
|
||||
current.limit((c * bytesPerSum) + lastIncompleteChunk);
|
||||
} else {
|
||||
// set the buffer limit to end of every chunk.
|
||||
current.limit((c + 1) * bytesPerSum);
|
||||
}
|
||||
|
||||
// compute the crc
|
||||
crc.reset();
|
||||
crc.update(current);
|
||||
|
@ -396,11 +424,34 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates range parameters.
|
||||
* In case of CheckSum FS, we already have calculated
|
||||
* fileLength so failing fast here.
|
||||
* @param ranges requested ranges.
|
||||
* @param fileLength length of file.
|
||||
* @throws EOFException end of file exception.
|
||||
*/
|
||||
private void validateRangeRequest(List<? extends FileRange> ranges,
|
||||
final long fileLength) throws EOFException {
|
||||
for (FileRange range : ranges) {
|
||||
VectoredReadUtils.validateRangeRequest(range);
|
||||
if (range.getOffset() + range.getLength() > fileLength) {
|
||||
final String errMsg = String.format("Requested range [%d, %d) is beyond EOF for path %s",
|
||||
range.getOffset(), range.getLength(), file);
|
||||
LOG.warn(errMsg);
|
||||
throw new EOFException(errMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readVectored(List<? extends FileRange> ranges,
|
||||
IntFunction<ByteBuffer> allocate) throws IOException {
|
||||
final long length = getFileLength();
|
||||
validateRangeRequest(ranges, length);
|
||||
|
||||
// If the stream doesn't have checksums, just delegate.
|
||||
VectoredReadUtils.validateVectoredReadRanges(ranges);
|
||||
if (sums == null) {
|
||||
datas.readVectored(ranges, allocate);
|
||||
return;
|
||||
|
@ -410,15 +461,18 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
List<CombinedFileRange> dataRanges =
|
||||
VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(ranges)), bytesPerSum,
|
||||
minSeek, maxReadSizeForVectorReads());
|
||||
// While merging the ranges above, they are rounded up based on the value of bytesPerSum
|
||||
// which leads to some ranges crossing the EOF thus they need to be fixed else it will
|
||||
// cause EOFException during actual reads.
|
||||
for (CombinedFileRange range : dataRanges) {
|
||||
if (range.getOffset() + range.getLength() > length) {
|
||||
range.setLength((int) (length - range.getOffset()));
|
||||
}
|
||||
}
|
||||
List<CombinedFileRange> checksumRanges = findChecksumRanges(dataRanges,
|
||||
bytesPerSum, minSeek, maxSize);
|
||||
sums.readVectored(checksumRanges, allocate);
|
||||
datas.readVectored(dataRanges, allocate);
|
||||
// Data read is correct. I have verified content of dataRanges.
|
||||
// There is some bug below here as test (testVectoredReadMultipleRanges)
|
||||
// is failing, should be
|
||||
// somewhere while slicing the merged data into smaller user ranges.
|
||||
// Spend some time figuring out but it is a complex code.
|
||||
for(CombinedFileRange checksumRange: checksumRanges) {
|
||||
for(FileRange dataRange: checksumRange.getUnderlying()) {
|
||||
// when we have both the ranges, validate the checksum
|
||||
|
|
|
@ -417,6 +417,14 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
/** How often to retry a ZooKeeper operation in milliseconds. */
|
||||
public static final String ZK_RETRY_INTERVAL_MS =
|
||||
ZK_PREFIX + "retry-interval-ms";
|
||||
/** Keystore location for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_KEYSTORE_LOCATION = ZK_PREFIX + "ssl.keystore.location";
|
||||
/** Keystore password for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_KEYSTORE_PASSWORD = ZK_PREFIX + "ssl.keystore.password";
|
||||
/** Truststore location for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_TRUSTSTORE_LOCATION = ZK_PREFIX + "ssl.truststore.location";
|
||||
/** Truststore password for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_TRUSTSTORE_PASSWORD = ZK_PREFIX + "ssl.truststore.password";
|
||||
public static final int ZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
|
||||
/** Default domain name resolver for hadoop to use. */
|
||||
public static final String HADOOP_DOMAINNAME_RESOLVER_IMPL =
|
||||
|
@ -480,13 +488,16 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
* Thread-level IOStats Support.
|
||||
* {@value}
|
||||
*/
|
||||
public static final String THREAD_LEVEL_IOSTATISTICS_ENABLED =
|
||||
"fs.thread.level.iostatistics.enabled";
|
||||
public static final String IOSTATISTICS_THREAD_LEVEL_ENABLED =
|
||||
"fs.iostatistics.thread.level.enabled";
|
||||
|
||||
/**
|
||||
* Default value for Thread-level IOStats Support is true.
|
||||
*/
|
||||
public static final boolean THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT =
|
||||
public static final boolean IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT =
|
||||
true;
|
||||
|
||||
public static final String HADOOP_SECURITY_RESOLVER_IMPL =
|
||||
"hadoop.security.resolver.impl";
|
||||
|
||||
}
|
||||
|
|
|
@ -1000,6 +1000,7 @@ public class CommonConfigurationKeysPublic {
|
|||
String.join(",",
|
||||
"secret$",
|
||||
"password$",
|
||||
"username$",
|
||||
"ssl.keystore.pass$",
|
||||
"fs.s3.*[Ss]ecret.?[Kk]ey",
|
||||
"fs.s3a.*.server-side-encryption.key",
|
||||
|
|
|
@ -163,5 +163,11 @@ public final class CommonPathCapabilities {
|
|||
public static final String ETAGS_PRESERVED_IN_RENAME =
|
||||
"fs.capability.etags.preserved.in.rename";
|
||||
|
||||
/**
|
||||
* Does this Filesystem support lease recovery operations such as
|
||||
* {@link LeaseRecoverable#recoverLease(Path)} and {@link LeaseRecoverable#isFileClosed(Path)}}?
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String LEASE_RECOVERABLE = "fs.capability.lease.recoverable";
|
||||
|
||||
}
|
||||
|
|
|
@ -256,9 +256,8 @@ public class DelegationTokenRenewer
|
|||
try {
|
||||
action.cancel();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.error("Interrupted while canceling token for " + fs.getUri()
|
||||
+ "filesystem");
|
||||
LOG.debug("Exception in removeRenewAction: {}", ie);
|
||||
LOG.error("Interrupted while canceling token for {} filesystem.", fs.getUri());
|
||||
LOG.debug("Exception in removeRenewAction.", ie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,34 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
* The base interface which various FileSystem FileContext Builder
|
||||
* interfaces can extend, and which underlying implementations
|
||||
* will then implement.
|
||||
* <p>
|
||||
* HADOOP-16202 expanded the opt() and must() arguments with
|
||||
* operator overloading, but HADOOP-18724 identified mapping problems:
|
||||
* passing a long value in to {@code opt()} could end up invoking
|
||||
* {@code opt(string, double)}, which could then trigger parse failures.
|
||||
* <p>
|
||||
* To fix this without forcing existing code to break/be recompiled.
|
||||
* <ol>
|
||||
* <li>A new method to explicitly set a long value is added:
|
||||
* {@link #optLong(String, long)}
|
||||
* </li>
|
||||
* <li>A new method to explicitly set a double value is added:
|
||||
* {@link #optLong(String, long)}
|
||||
* </li>
|
||||
* <li>
|
||||
* All of {@link #opt(String, long)}, {@link #opt(String, float)} and
|
||||
* {@link #opt(String, double)} invoke {@link #optLong(String, long)}.
|
||||
* </li>
|
||||
* <li>
|
||||
* The same changes have been applied to {@code must()} methods.
|
||||
* </li>
|
||||
* </ol>
|
||||
* The forwarding of existing double/float setters to the long setters ensure
|
||||
* that existing code will link, but are guaranteed to always set a long value.
|
||||
* If you need to write code which works correctly with all hadoop releases,
|
||||
* covert the option to a string explicitly and then call {@link #opt(String, String)}
|
||||
* or {@link #must(String, String)} as appropriate.
|
||||
*
|
||||
* @param <S> Return type on the {@link #build()} call.
|
||||
* @param <B> type of builder itself.
|
||||
*/
|
||||
|
@ -50,7 +78,9 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
B opt(@Nonnull String key, boolean value);
|
||||
default B opt(@Nonnull String key, boolean value) {
|
||||
return opt(key, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional int parameter for the Builder.
|
||||
|
@ -60,17 +90,25 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
B opt(@Nonnull String key, int value);
|
||||
default B opt(@Nonnull String key, int value) {
|
||||
return optLong(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional float parameter for the Builder.
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #optLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
* @deprecated use {@link #optDouble(String, double)}
|
||||
*/
|
||||
B opt(@Nonnull String key, float value);
|
||||
@Deprecated
|
||||
default B opt(@Nonnull String key, float value) {
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional long parameter for the Builder.
|
||||
|
@ -78,19 +116,27 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
* @deprecated use {@link #optLong(String, long)} where possible.
|
||||
*/
|
||||
B opt(@Nonnull String key, long value);
|
||||
default B opt(@Nonnull String key, long value) {
|
||||
return optLong(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* Pass an optional double parameter for the Builder.
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #optLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
* @deprecated use {@link #optDouble(String, double)}
|
||||
*/
|
||||
B opt(@Nonnull String key, double value);
|
||||
@Deprecated
|
||||
default B opt(@Nonnull String key, double value) {
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an array of string values as optional parameter for the Builder.
|
||||
|
@ -102,6 +148,30 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
*/
|
||||
B opt(@Nonnull String key, @Nonnull String... values);
|
||||
|
||||
/**
|
||||
* Set optional long parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B optLong(@Nonnull String key, long value) {
|
||||
return opt(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B optDouble(@Nonnull String key, double value) {
|
||||
return opt(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory option to the Builder.
|
||||
*
|
||||
|
@ -122,7 +192,9 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
B must(@Nonnull String key, boolean value);
|
||||
default B must(@Nonnull String key, boolean value) {
|
||||
return must(key, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory int option.
|
||||
|
@ -132,17 +204,24 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
B must(@Nonnull String key, int value);
|
||||
default B must(@Nonnull String key, int value) {
|
||||
return mustLong(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory float option.
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #mustLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
* @deprecated use {@link #mustDouble(String, double)} to set floating point.
|
||||
*/
|
||||
B must(@Nonnull String key, float value);
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, float value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory long option.
|
||||
|
@ -152,17 +231,24 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
B must(@Nonnull String key, long value);
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, long value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double option.
|
||||
* Set mandatory long option, despite passing in a floating
|
||||
* point value.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
B must(@Nonnull String key, double value);
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, double value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a string array as mandatory option.
|
||||
|
@ -174,6 +260,30 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
*/
|
||||
B must(@Nonnull String key, @Nonnull String... values);
|
||||
|
||||
/**
|
||||
* Set mandatory long parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B mustLong(@Nonnull String key, long value) {
|
||||
return must(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B mustDouble(@Nonnull String key, double value) {
|
||||
return must(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate the object which was being built.
|
||||
*
|
||||
|
|
|
@ -2231,7 +2231,7 @@ public class FileContext implements PathCapabilities {
|
|||
InputStream in = awaitFuture(openFile(qSrc)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
fs.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
try (OutputStream out = create(qDst, createFlag)) {
|
||||
|
|
|
@ -55,6 +55,15 @@ public interface FileRange {
|
|||
*/
|
||||
void setData(CompletableFuture<ByteBuffer> data);
|
||||
|
||||
/**
|
||||
* Get any reference passed in to the file range constructor.
|
||||
* This is not used by any implementation code; it is to help
|
||||
* bind this API to libraries retrieving multiple stripes of
|
||||
* data in parallel.
|
||||
* @return a reference or null.
|
||||
*/
|
||||
Object getReference();
|
||||
|
||||
/**
|
||||
* Factory method to create a FileRange object.
|
||||
* @param offset starting offset of the range.
|
||||
|
@ -62,6 +71,17 @@ public interface FileRange {
|
|||
* @return a new instance of FileRangeImpl.
|
||||
*/
|
||||
static FileRange createFileRange(long offset, int length) {
|
||||
return new FileRangeImpl(offset, length);
|
||||
return new FileRangeImpl(offset, length, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method to create a FileRange object.
|
||||
* @param offset starting offset of the range.
|
||||
* @param length length of the range.
|
||||
* @param reference nullable reference to store in the range.
|
||||
* @return a new instance of FileRangeImpl.
|
||||
*/
|
||||
static FileRange createFileRange(long offset, int length, Object reference) {
|
||||
return new FileRangeImpl(offset, length, reference);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -402,7 +402,8 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus
|
||||
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||
* order of path.
|
||||
* @param o the FileStatus to be compared.
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
|
@ -412,7 +413,8 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus.
|
||||
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||
* order of path.
|
||||
* This method was added back by HADOOP-14683 to keep binary compatibility.
|
||||
*
|
||||
* @param o the FileStatus to be compared.
|
||||
|
|
|
@ -21,7 +21,6 @@ import javax.annotation.Nonnull;
|
|||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.net.URI;
|
||||
|
@ -1544,6 +1543,39 @@ public abstract class FileSystem extends Configured
|
|||
public abstract FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress) throws IOException;
|
||||
|
||||
/**
|
||||
* Append to an existing file (optional operation).
|
||||
* @param f the existing file to be appended.
|
||||
* @param appendToNewBlock whether to append data to a new block
|
||||
* instead of the end of the last partial block
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* @return output stream.
|
||||
*/
|
||||
public FSDataOutputStream append(Path f, boolean appendToNewBlock) throws IOException {
|
||||
return append(f, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||
IO_FILE_BUFFER_SIZE_DEFAULT), null, appendToNewBlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append to an existing file (optional operation).
|
||||
* This function is used for being overridden by some FileSystem like DistributedFileSystem
|
||||
* @param f the existing file to be appended.
|
||||
* @param bufferSize the size of the buffer to be used.
|
||||
* @param progress for reporting progress if it is not null.
|
||||
* @param appendToNewBlock whether to append data to a new block
|
||||
* instead of the end of the last partial block
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* @return output stream.
|
||||
*/
|
||||
public FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress, boolean appendToNewBlock) throws IOException {
|
||||
return append(f, bufferSize, progress);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat existing files together.
|
||||
* @param trg the path to the target destination.
|
||||
|
@ -2381,8 +2413,14 @@ public abstract class FileSystem extends Configured
|
|||
if (stat.isFile()) { // file
|
||||
curFile = stat;
|
||||
} else if (recursive) { // directory
|
||||
try {
|
||||
RemoteIterator<LocatedFileStatus> newDirItor = listLocatedStatus(stat.getPath());
|
||||
itors.push(curItor);
|
||||
curItor = listLocatedStatus(stat.getPath());
|
||||
curItor = newDirItor;
|
||||
} catch (FileNotFoundException ignored) {
|
||||
LOGGER.debug("Directory {} deleted while attempting for recursive listing",
|
||||
stat.getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3564,9 +3602,9 @@ public abstract class FileSystem extends Configured
|
|||
} catch (IOException | RuntimeException e) {
|
||||
// exception raised during initialization.
|
||||
// log summary at warn and full stack at debug
|
||||
LOGGER.warn("Failed to initialize fileystem {}: {}",
|
||||
LOGGER.warn("Failed to initialize filesystem {}: {}",
|
||||
uri, e.toString());
|
||||
LOGGER.debug("Failed to initialize fileystem", e);
|
||||
LOGGER.debug("Failed to initialize filesystem", e);
|
||||
// then (robustly) close the FS, so as to invoke any
|
||||
// cleanup code.
|
||||
IOUtils.cleanupWithLogger(LOGGER, fs);
|
||||
|
@ -3647,11 +3685,7 @@ public abstract class FileSystem extends Configured
|
|||
// to construct an instance.
|
||||
try (DurationInfo d = new DurationInfo(LOGGER, false,
|
||||
"Acquiring creator semaphore for %s", uri)) {
|
||||
creatorPermits.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
// acquisition was interrupted; convert to an IOE.
|
||||
throw (IOException)new InterruptedIOException(e.toString())
|
||||
.initCause(e);
|
||||
creatorPermits.acquireUninterruptibly();
|
||||
}
|
||||
FileSystem fsToClose = null;
|
||||
try {
|
||||
|
@ -3908,6 +3942,7 @@ public abstract class FileSystem extends Configured
|
|||
private volatile long bytesReadDistanceOfThreeOrFour;
|
||||
private volatile long bytesReadDistanceOfFiveOrLarger;
|
||||
private volatile long bytesReadErasureCoded;
|
||||
private volatile long remoteReadTimeMS;
|
||||
|
||||
/**
|
||||
* Add another StatisticsData object to this one.
|
||||
|
@ -3925,6 +3960,7 @@ public abstract class FileSystem extends Configured
|
|||
this.bytesReadDistanceOfFiveOrLarger +=
|
||||
other.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded += other.bytesReadErasureCoded;
|
||||
this.remoteReadTimeMS += other.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3943,6 +3979,7 @@ public abstract class FileSystem extends Configured
|
|||
this.bytesReadDistanceOfFiveOrLarger =
|
||||
-this.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded = -this.bytesReadErasureCoded;
|
||||
this.remoteReadTimeMS = -this.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -3991,6 +4028,10 @@ public abstract class FileSystem extends Configured
|
|||
public long getBytesReadErasureCoded() {
|
||||
return bytesReadErasureCoded;
|
||||
}
|
||||
|
||||
public long getRemoteReadTimeMS() {
|
||||
return remoteReadTimeMS;
|
||||
}
|
||||
}
|
||||
|
||||
private interface StatisticsAggregator<T> {
|
||||
|
@ -4218,6 +4259,14 @@ public abstract class FileSystem extends Configured
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the time taken to read bytes from remote in the statistics.
|
||||
* @param durationMS time taken in ms to read bytes from remote
|
||||
*/
|
||||
public void increaseRemoteReadTime(final long durationMS) {
|
||||
getThreadStatistics().remoteReadTimeMS += durationMS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the given aggregator to all StatisticsData objects associated with
|
||||
* this Statistics object.
|
||||
|
@ -4365,6 +4414,25 @@ public abstract class FileSystem extends Configured
|
|||
return bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get total time taken in ms for bytes read from remote.
|
||||
* @return time taken in ms for remote bytes read.
|
||||
*/
|
||||
public long getRemoteReadTime() {
|
||||
return visitAll(new StatisticsAggregator<Long>() {
|
||||
private long remoteReadTimeMS = 0;
|
||||
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
remoteReadTimeMS += data.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
public Long aggregate() {
|
||||
return remoteReadTimeMS;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all statistics data.
|
||||
* MR or other frameworks can use the method to get all statistics at once.
|
||||
|
|
|
@ -47,7 +47,8 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
"bytesReadDistanceOfOneOrTwo",
|
||||
"bytesReadDistanceOfThreeOrFour",
|
||||
"bytesReadDistanceOfFiveOrLarger",
|
||||
"bytesReadErasureCoded"
|
||||
"bytesReadErasureCoded",
|
||||
"remoteReadTimeMS"
|
||||
};
|
||||
|
||||
private static class LongStatisticIterator
|
||||
|
@ -107,6 +108,8 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
return data.getBytesReadDistanceOfFiveOrLarger();
|
||||
case "bytesReadErasureCoded":
|
||||
return data.getBytesReadErasureCoded();
|
||||
case "remoteReadTimeMS":
|
||||
return data.getRemoteReadTimeMS();
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -484,7 +484,7 @@ public class FileUtil {
|
|||
in = awaitFuture(srcFS.openFile(src)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
srcStatus.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
out = dstFS.create(dst, overwrite);
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given Path of the FileSystem has the capability to perform lease recovery.
|
||||
*/
|
||||
public interface LeaseRecoverable {
|
||||
|
||||
/**
|
||||
* Start the lease recovery of a file.
|
||||
*
|
||||
* @param file path to a file.
|
||||
* @return true if the file is already closed, and it does not require lease recovery.
|
||||
* @throws IOException if an error occurs during lease recovery.
|
||||
* @throws UnsupportedOperationException if lease recovery is not supported by this filesystem.
|
||||
*/
|
||||
boolean recoverLease(Path file) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the close status of a file.
|
||||
* @param file The string representation of the path to the file
|
||||
* @return return true if file is closed
|
||||
* @throws IOException If an I/O error occurred
|
||||
* @throws UnsupportedOperationException if isFileClosed is not supported by this filesystem.
|
||||
*/
|
||||
boolean isFileClosed(Path file) throws IOException;
|
||||
}
|
|
@ -396,6 +396,10 @@ public class LocalDirAllocator {
|
|||
Context ctx = confChanged(conf);
|
||||
int numDirs = ctx.localDirs.length;
|
||||
int numDirsSearched = 0;
|
||||
// Max capacity in any directory
|
||||
long maxCapacity = 0;
|
||||
String errorText = null;
|
||||
IOException diskException = null;
|
||||
//remove the leading slash from the path (to make sure that the uri
|
||||
//resolution results in a valid path on the dir being checked)
|
||||
if (pathStr.startsWith("/")) {
|
||||
|
@ -410,7 +414,14 @@ public class LocalDirAllocator {
|
|||
|
||||
//build the "roulette wheel"
|
||||
for(int i =0; i < ctx.dirDF.length; ++i) {
|
||||
availableOnDisk[i] = ctx.dirDF[i].getAvailable();
|
||||
final DF target = ctx.dirDF[i];
|
||||
// attempt to recreate the dir so that getAvailable() is valid
|
||||
// if it fails, getAvailable() will return 0, so the dir will
|
||||
// be declared unavailable.
|
||||
// return value is logged at debug to keep spotbugs quiet.
|
||||
final boolean b = new File(target.getDirPath()).mkdirs();
|
||||
LOG.debug("mkdirs of {}={}", target, b);
|
||||
availableOnDisk[i] = target.getAvailable();
|
||||
totalAvailable += availableOnDisk[i];
|
||||
}
|
||||
|
||||
|
@ -444,9 +455,18 @@ public class LocalDirAllocator {
|
|||
int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc);
|
||||
while (numDirsSearched < numDirs) {
|
||||
long capacity = ctx.dirDF[dirNum].getAvailable();
|
||||
if (capacity > maxCapacity) {
|
||||
maxCapacity = capacity;
|
||||
}
|
||||
if (capacity > size) {
|
||||
returnPath =
|
||||
createPath(ctx.localDirs[dirNum], pathStr, checkWrite);
|
||||
try {
|
||||
returnPath = createPath(ctx.localDirs[dirNum], pathStr,
|
||||
checkWrite);
|
||||
} catch (IOException e) {
|
||||
errorText = e.getMessage();
|
||||
diskException = e;
|
||||
LOG.debug("DiskException caught for dir {}", ctx.localDirs[dirNum], e);
|
||||
}
|
||||
if (returnPath != null) {
|
||||
ctx.getAndIncrDirNumLastAccessed(numDirsSearched);
|
||||
break;
|
||||
|
@ -462,8 +482,13 @@ public class LocalDirAllocator {
|
|||
}
|
||||
|
||||
//no path found
|
||||
throw new DiskErrorException("Could not find any valid local " +
|
||||
"directory for " + pathStr);
|
||||
String newErrorText = "Could not find any valid local directory for " +
|
||||
pathStr + " with requested size " + size +
|
||||
" as the max capacity in any directory is " + maxCapacity;
|
||||
if (errorText != null) {
|
||||
newErrorText = newErrorText + " due to " + errorText;
|
||||
}
|
||||
throw new DiskErrorException(newErrorText, diskException);
|
||||
}
|
||||
|
||||
/** Creates a file on the local FS. Pass size as
|
||||
|
|
|
@ -465,7 +465,12 @@ public class Path
|
|||
* @return a new path with the suffix added
|
||||
*/
|
||||
public Path suffix(String suffix) {
|
||||
return new Path(getParent(), getName()+suffix);
|
||||
Path parent = getParent();
|
||||
if (parent == null) {
|
||||
return new Path("/", getName() + suffix);
|
||||
}
|
||||
|
||||
return new Path(parent, getName() + suffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -114,6 +114,16 @@ public interface PositionedReadable {
|
|||
* As a result of the call, each range will have FileRange.setData(CompletableFuture)
|
||||
* called with a future that when complete will have a ByteBuffer with the
|
||||
* data from the file's range.
|
||||
* <p>
|
||||
* The position returned by getPos() after readVectored() is undefined.
|
||||
* </p>
|
||||
* <p>
|
||||
* If a file is changed while the readVectored() operation is in progress, the output is
|
||||
* undefined. Some ranges may have old data, some may have new and some may have both.
|
||||
* </p>
|
||||
* <p>
|
||||
* While a readVectored() operation is in progress, normal read api calls may block.
|
||||
* </p>
|
||||
* @param ranges the byte ranges to read
|
||||
* @param allocate the function to allocate ByteBuffer
|
||||
* @throws IOException any IOE.
|
||||
|
|
|
@ -1326,4 +1326,9 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
return super.hasPathCapability(path, capability);
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static void setUseDeprecatedFileStatus(boolean useDeprecatedFileStatus) {
|
||||
RawLocalFileSystem.useDeprecatedFileStatus = useDeprecatedFileStatus;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given filesystem is in any status of safe mode.
|
||||
*/
|
||||
public interface SafeMode {
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
default boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @param isChecked If true check only for Active metadata node / NameNode's status,
|
||||
* else check first metadata node / NameNode's status.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
|
||||
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
/**
|
||||
* An identical copy from org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction, that helps
|
||||
* the other file system implementation to define {@link SafeMode}.
|
||||
*/
|
||||
public enum SafeModeAction {
|
||||
/**
|
||||
* Starting entering into safe mode.
|
||||
*/
|
||||
ENTER,
|
||||
/**
|
||||
* Gracefully exit from safe mode.
|
||||
*/
|
||||
LEAVE,
|
||||
/**
|
||||
* Force Exit from safe mode.
|
||||
*/
|
||||
FORCE_EXIT,
|
||||
/**
|
||||
* Get the status of the safe mode.
|
||||
*/
|
||||
GET;
|
||||
}
|
|
@ -84,7 +84,7 @@ public interface StreamCapabilities {
|
|||
* Support for vectored IO api.
|
||||
* See {@code PositionedReadable#readVectored(List, IntFunction)}.
|
||||
*/
|
||||
String VECTOREDIO = "readvectored";
|
||||
String VECTOREDIO = "in:readvectored";
|
||||
|
||||
/**
|
||||
* Stream abort() capability implemented by {@link Abortable#abort()}.
|
||||
|
|
|
@ -23,8 +23,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import static org.apache.hadoop.fs.viewfs.Constants.*;
|
||||
|
||||
/**
|
||||
* Provides a trash facility which supports pluggable Trash policies.
|
||||
|
@ -67,7 +69,7 @@ public class Trash extends Configured {
|
|||
* Hence we get the file system of the fully-qualified resolved-path and
|
||||
* then move the path p to the trashbin in that volume,
|
||||
* @param fs - the filesystem of path p
|
||||
* @param p - the path being deleted - to be moved to trasg
|
||||
* @param p - the path being deleted - to be moved to trash
|
||||
* @param conf - configuration
|
||||
* @return false if the item is already in the trash or trash is disabled
|
||||
* @throws IOException on error
|
||||
|
@ -94,6 +96,27 @@ public class Trash extends Configured {
|
|||
LOG.warn("Failed to get server trash configuration", e);
|
||||
throw new IOException("Failed to get server trash configuration", e);
|
||||
}
|
||||
|
||||
/*
|
||||
* In HADOOP-18144, we changed getTrashRoot() in ViewFileSystem to return a
|
||||
* viewFS path, instead of a targetFS path. moveToTrash works for
|
||||
* ViewFileSystem now. ViewFileSystem will do path resolution internally by
|
||||
* itself.
|
||||
*
|
||||
* When localized trash flag is enabled:
|
||||
* 1). if fs is a ViewFileSystem, we can initialize Trash() with a
|
||||
* ViewFileSystem object;
|
||||
* 2). When fs is not a ViewFileSystem, the only place we would need to
|
||||
* resolve a path is for symbolic links. However, symlink is not
|
||||
* enabled in Hadoop due to the complexity to support it
|
||||
* (HADOOP-10019).
|
||||
*/
|
||||
if (conf.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
|
||||
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
|
||||
Trash trash = new Trash(fs, conf);
|
||||
return trash.moveToTrash(p);
|
||||
}
|
||||
|
||||
Trash trash = new Trash(fullyResolvedFs, conf);
|
||||
return trash.moveToTrash(fullyResolvedPath);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.function.IntFunction;
|
|||
|
||||
import org.apache.hadoop.fs.impl.CombinedFileRange;
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.util.functional.Function4RaisingIOE;
|
||||
|
||||
/**
|
||||
* Utility class which implements helper methods used
|
||||
|
@ -37,6 +38,8 @@ import org.apache.hadoop.util.Preconditions;
|
|||
*/
|
||||
public final class VectoredReadUtils {
|
||||
|
||||
private static final int TMP_BUFFER_MAX_SIZE = 64 * 1024;
|
||||
|
||||
/**
|
||||
* Validate a single range.
|
||||
* @param range file range.
|
||||
|
@ -114,7 +117,12 @@ public final class VectoredReadUtils {
|
|||
FileRange range,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
if (buffer.isDirect()) {
|
||||
buffer.put(readInDirectBuffer(stream, range));
|
||||
readInDirectBuffer(range.getLength(),
|
||||
buffer,
|
||||
(position, buffer1, offset, length) -> {
|
||||
stream.readFully(position, buffer1, offset, length);
|
||||
return null;
|
||||
});
|
||||
buffer.flip();
|
||||
} else {
|
||||
stream.readFully(range.getOffset(), buffer.array(),
|
||||
|
@ -122,13 +130,34 @@ public final class VectoredReadUtils {
|
|||
}
|
||||
}
|
||||
|
||||
private static byte[] readInDirectBuffer(PositionedReadable stream,
|
||||
FileRange range) throws IOException {
|
||||
// if we need to read data from a direct buffer and the stream doesn't
|
||||
// support it, we allocate a byte array to use.
|
||||
byte[] tmp = new byte[range.getLength()];
|
||||
stream.readFully(range.getOffset(), tmp, 0, tmp.length);
|
||||
return tmp;
|
||||
/**
|
||||
* Read bytes from stream into a byte buffer using an
|
||||
* intermediate byte array.
|
||||
* @param length number of bytes to read.
|
||||
* @param buffer buffer to fill.
|
||||
* @param operation operation to use for reading data.
|
||||
* @throws IOException any IOE.
|
||||
*/
|
||||
public static void readInDirectBuffer(int length,
|
||||
ByteBuffer buffer,
|
||||
Function4RaisingIOE<Integer, byte[], Integer,
|
||||
Integer, Void> operation) throws IOException {
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
int readBytes = 0;
|
||||
int position = 0;
|
||||
int tmpBufferMaxSize = Math.min(TMP_BUFFER_MAX_SIZE, length);
|
||||
byte[] tmp = new byte[tmpBufferMaxSize];
|
||||
while (readBytes < length) {
|
||||
int currentLength = (readBytes + tmpBufferMaxSize) < length ?
|
||||
tmpBufferMaxSize
|
||||
: (length - readBytes);
|
||||
operation.apply(position, tmp, 0, currentLength);
|
||||
buffer.put(tmp, 0, currentLength);
|
||||
position = position + currentLength;
|
||||
readBytes = readBytes + currentLength;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,6 +239,7 @@ public final class VectoredReadUtils {
|
|||
if (sortedRanges[i].getOffset() < prev.getOffset() + prev.getLength()) {
|
||||
throw new UnsupportedOperationException("Overlapping ranges are not supported");
|
||||
}
|
||||
prev = sortedRanges[i];
|
||||
}
|
||||
return Arrays.asList(sortedRanges);
|
||||
}
|
||||
|
@ -277,9 +307,16 @@ public final class VectoredReadUtils {
|
|||
FileRange request) {
|
||||
int offsetChange = (int) (request.getOffset() - readOffset);
|
||||
int requestLength = request.getLength();
|
||||
// Create a new buffer that is backed by the original contents
|
||||
// The buffer will have position 0 and the same limit as the original one
|
||||
readData = readData.slice();
|
||||
// Change the offset and the limit of the buffer as the reader wants to see
|
||||
// only relevant data
|
||||
readData.position(offsetChange);
|
||||
readData.limit(offsetChange + requestLength);
|
||||
// Create a new buffer after the limit change so that only that portion of the data is
|
||||
// returned to the reader.
|
||||
readData = readData.slice();
|
||||
return readData;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,6 +90,11 @@ public final class AuditConstants {
|
|||
*/
|
||||
public static final String PARAM_PROCESS = "ps";
|
||||
|
||||
/**
|
||||
* Header: Range for GET request data: {@value}.
|
||||
*/
|
||||
public static final String PARAM_RANGE = "rg";
|
||||
|
||||
/**
|
||||
* Task Attempt ID query header: {@value}.
|
||||
*/
|
||||
|
@ -110,4 +115,9 @@ public final class AuditConstants {
|
|||
*/
|
||||
public static final String PARAM_TIMESTAMP = "ts";
|
||||
|
||||
/**
|
||||
* Num of files to be deleted as part of the bulk delete request.
|
||||
*/
|
||||
public static final String DELETE_KEYS_SIZE = "ks";
|
||||
|
||||
}
|
||||
|
|
|
@ -44,11 +44,13 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
|
|||
* with option support.
|
||||
*
|
||||
* <code>
|
||||
* .opt("foofs:option.a", true)
|
||||
* .opt("foofs:option.b", "value")
|
||||
* .opt("fs.s3a.open.option.caching", true)
|
||||
* .opt("fs.option.openfile.read.policy", "random, adaptive")
|
||||
* .opt("fs.s3a.open.option.etag", "9fe4c37c25b")
|
||||
* .must("foofs:cache", true)
|
||||
* .must("barfs:cache-size", 256 * 1024 * 1024)
|
||||
* .optLong("fs.option.openfile.length", 1_500_000_000_000)
|
||||
* .must("fs.option.openfile.buffer.size", 256_000)
|
||||
* .mustLong("fs.option.openfile.split.start", 256_000_000)
|
||||
* .mustLong("fs.option.openfile.split.end", 512_000_000)
|
||||
* .build();
|
||||
* </code>
|
||||
*
|
||||
|
@ -64,6 +66,7 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
@SuppressWarnings({"deprecation", "unused"})
|
||||
public abstract class
|
||||
AbstractFSBuilderImpl<S, B extends FSBuilder<S, B>>
|
||||
implements FSBuilder<S, B> {
|
||||
|
@ -178,10 +181,7 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, boolean value) {
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
return opt(key, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,18 +191,17 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, int value) {
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
return optLong(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, final long value) {
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setLong(key, value);
|
||||
return getThisBuilder();
|
||||
return optLong(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public B optLong(@Nonnull final String key, final long value) {
|
||||
return opt(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -212,10 +211,7 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, float value) {
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -225,10 +221,17 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, double value) {
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B optDouble(@Nonnull final String key, double value) {
|
||||
return opt(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -264,10 +267,22 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, boolean value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
return must(key, Boolean.toString(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public B mustLong(@Nonnull final String key, final long value) {
|
||||
return must(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B mustDouble(@Nonnull final String key, double value) {
|
||||
return must(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -277,44 +292,22 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, int value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
return mustLong(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public B must(@Nonnull final String key, final long value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setLong(key, value);
|
||||
return getThisBuilder();
|
||||
return mustLong(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory float option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, float value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
public B must(@Nonnull final String key, final float value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, double value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,10 +29,10 @@ import java.util.List;
|
|||
* together into a single read for efficiency.
|
||||
*/
|
||||
public class CombinedFileRange extends FileRangeImpl {
|
||||
private ArrayList<FileRange> underlying = new ArrayList<>();
|
||||
private List<FileRange> underlying = new ArrayList<>();
|
||||
|
||||
public CombinedFileRange(long offset, long end, FileRange original) {
|
||||
super(offset, (int) (end - offset));
|
||||
super(offset, (int) (end - offset), null);
|
||||
this.underlying.add(original);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.store.LogExactlyOnce;
|
||||
|
||||
/**
|
||||
* Class to help with use of FSBuilder.
|
||||
*/
|
||||
public class FSBuilderSupport {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FSBuilderSupport.class);
|
||||
|
||||
public static final LogExactlyOnce LOG_PARSE_ERROR = new LogExactlyOnce(LOG);
|
||||
|
||||
/**
|
||||
* Options which are parsed.
|
||||
*/
|
||||
private final Configuration options;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param options the configuration options from the builder.
|
||||
*/
|
||||
public FSBuilderSupport(final Configuration options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
public Configuration getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a long value with resilience to unparseable values.
|
||||
* Negative values are replaced with the default.
|
||||
* @param key key to log
|
||||
* @param defVal default value
|
||||
* @return long value
|
||||
*/
|
||||
public long getPositiveLong(String key, long defVal) {
|
||||
long l = getLong(key, defVal);
|
||||
if (l < 0) {
|
||||
LOG.debug("The option {} has a negative value {}, replacing with the default {}",
|
||||
key, l, defVal);
|
||||
l = defVal;
|
||||
}
|
||||
return l;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a long value with resilience to unparseable values.
|
||||
* @param key key to log
|
||||
* @param defVal default value
|
||||
* @return long value
|
||||
*/
|
||||
public long getLong(String key, long defVal) {
|
||||
final String v = options.getTrimmed(key, "");
|
||||
if (v.isEmpty()) {
|
||||
return defVal;
|
||||
}
|
||||
try {
|
||||
return options.getLong(key, defVal);
|
||||
} catch (NumberFormatException e) {
|
||||
final String msg = String.format(
|
||||
"The option %s value \"%s\" is not a long integer; using the default value %s",
|
||||
key, v, defVal);
|
||||
// not a long,
|
||||
LOG_PARSE_ERROR.warn(msg);
|
||||
LOG.debug("{}", msg, e);
|
||||
return defVal;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -34,9 +34,21 @@ public class FileRangeImpl implements FileRange {
|
|||
private int length;
|
||||
private CompletableFuture<ByteBuffer> reader;
|
||||
|
||||
public FileRangeImpl(long offset, int length) {
|
||||
/**
|
||||
* nullable reference to store in the range.
|
||||
*/
|
||||
private final Object reference;
|
||||
|
||||
/**
|
||||
* Create.
|
||||
* @param offset offset in file
|
||||
* @param length length of data to read.
|
||||
* @param reference nullable reference to store in the range.
|
||||
*/
|
||||
public FileRangeImpl(long offset, int length, Object reference) {
|
||||
this.offset = offset;
|
||||
this.length = length;
|
||||
this.reference = reference;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,4 +83,9 @@ public class FileRangeImpl implements FileRange {
|
|||
public CompletableFuture<ByteBuffer> getData() {
|
||||
return reader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getReference() {
|
||||
return reference;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||
import org.apache.hadoop.metrics2.MetricsSource;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A weak referenced metrics source which avoids hanging on to large objects
|
||||
* if somehow they don't get fully closed/cleaned up.
|
||||
* The JVM may clean up all objects which are only weakly referenced whenever
|
||||
* it does a GC, <i>even if there is no memory pressure</i>.
|
||||
* To avoid these refs being removed, always keep a strong reference around
|
||||
* somewhere.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class WeakRefMetricsSource implements MetricsSource {
|
||||
|
||||
/**
|
||||
* Name to know when unregistering.
|
||||
*/
|
||||
private final String name;
|
||||
|
||||
/**
|
||||
* Underlying metrics source.
|
||||
*/
|
||||
private final WeakReference<MetricsSource> sourceWeakReference;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param name Name to know when unregistering.
|
||||
* @param source metrics source
|
||||
*/
|
||||
public WeakRefMetricsSource(final String name, final MetricsSource source) {
|
||||
this.name = name;
|
||||
this.sourceWeakReference = new WeakReference<>(requireNonNull(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* If the weak reference is non null, update the metrics.
|
||||
* @param collector to contain the resulting metrics snapshot
|
||||
* @param all if true, return all metrics even if unchanged.
|
||||
*/
|
||||
@Override
|
||||
public void getMetrics(final MetricsCollector collector, final boolean all) {
|
||||
MetricsSource metricsSource = sourceWeakReference.get();
|
||||
if (metricsSource != null) {
|
||||
metricsSource.getMetrics(collector, all);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Name to know when unregistering.
|
||||
* @return the name passed in during construction.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the source, will be null if the reference has been GC'd
|
||||
* @return the source reference
|
||||
*/
|
||||
public MetricsSource getSource() {
|
||||
return sourceWeakReference.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WeakRefMetricsSource{" +
|
||||
"name='" + name + '\'' +
|
||||
", sourceWeakReference is " +
|
||||
(sourceWeakReference.get() == null ? "unset" : "set") +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -25,6 +25,8 @@ import javax.annotation.Nullable;
|
|||
|
||||
import org.apache.hadoop.util.WeakReferenceMap;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A WeakReferenceMap for threads.
|
||||
* @param <V> value type of the map
|
||||
|
@ -36,30 +38,55 @@ public class WeakReferenceThreadMap<V> extends WeakReferenceMap<Long, V> {
|
|||
super(factory, referenceLost);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value for the current thread, creating if needed.
|
||||
* @return an instance.
|
||||
*/
|
||||
public V getForCurrentThread() {
|
||||
return get(currentThreadId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the reference for the current thread.
|
||||
* @return any reference value which existed.
|
||||
*/
|
||||
public V removeForCurrentThread() {
|
||||
return remove(currentThreadId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current thread ID.
|
||||
* @return thread ID.
|
||||
*/
|
||||
public long currentThreadId() {
|
||||
return Thread.currentThread().getId();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new value for the current thread.
|
||||
* @param newVal new reference to set for the active thread.
|
||||
* @return the previously set value, possibly null
|
||||
*/
|
||||
public V setForCurrentThread(V newVal) {
|
||||
requireNonNull(newVal);
|
||||
long id = currentThreadId();
|
||||
|
||||
// if the same object is already in the map, just return it.
|
||||
WeakReference<V> ref = lookup(id);
|
||||
// Reference value could be set to null. Thus, ref.get() could return
|
||||
// null. Should be handled accordingly while using the returned value.
|
||||
if (ref != null && ref.get() == newVal) {
|
||||
return ref.get();
|
||||
}
|
||||
WeakReference<V> existingWeakRef = lookup(id);
|
||||
|
||||
// The looked up reference could be one of
|
||||
// 1. null: nothing there
|
||||
// 2. valid but get() == null : reference lost by GC.
|
||||
// 3. different from the new value
|
||||
// 4. the same as the old value
|
||||
if (resolve(existingWeakRef) == newVal) {
|
||||
// case 4: do nothing, return the new value
|
||||
return newVal;
|
||||
} else {
|
||||
// cases 1, 2, 3: update the map and return the old value
|
||||
return put(id, newVal);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
|
||||
/**
|
||||
* Provides functionality necessary for caching blocks of data read from FileSystem.
|
||||
*/
|
||||
public interface BlockCache extends Closeable {
|
||||
|
||||
/**
|
||||
* Indicates whether the given block is in this cache.
|
||||
*
|
||||
* @param blockNumber the id of the given block.
|
||||
* @return true if the given block is in this cache, false otherwise.
|
||||
*/
|
||||
boolean containsBlock(int blockNumber);
|
||||
|
||||
/**
|
||||
* Gets the blocks in this cache.
|
||||
*
|
||||
* @return the blocks in this cache.
|
||||
*/
|
||||
Iterable<Integer> blocks();
|
||||
|
||||
/**
|
||||
* Gets the number of blocks in this cache.
|
||||
*
|
||||
* @return the number of blocks in this cache.
|
||||
*/
|
||||
int size();
|
||||
|
||||
/**
|
||||
* Gets the block having the given {@code blockNumber}.
|
||||
*
|
||||
* @param blockNumber the id of the desired block.
|
||||
* @param buffer contents of the desired block are copied to this buffer.
|
||||
* @throws IOException if there is an error reading the given block.
|
||||
*/
|
||||
void get(int blockNumber, ByteBuffer buffer) throws IOException;
|
||||
|
||||
/**
|
||||
* Puts the given block in this cache.
|
||||
*
|
||||
* @param blockNumber the id of the given block.
|
||||
* @param buffer contents of the given block to be added to this cache.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @throws IOException if there is an error writing the given block.
|
||||
*/
|
||||
void put(int blockNumber, ByteBuffer buffer, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) throws IOException;
|
||||
}
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkPositiveInteger;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkWithinRange;
|
||||
|
||||
/**
|
||||
* Holds information about blocks of data in a file.
|
||||
*/
|
||||
public final class BlockData {
|
||||
|
||||
// State of each block of data.
|
||||
enum State {
|
||||
|
||||
/** Data is not yet ready to be read from this block (still being prefetched). */
|
||||
NOT_READY,
|
||||
|
||||
/** A read of this block has been enqueued in the prefetch queue. */
|
||||
QUEUED,
|
||||
|
||||
/** A read of this block has been enqueued in the prefetch queue. */
|
||||
READY,
|
||||
|
||||
/** This block has been cached in the local disk cache. */
|
||||
CACHED
|
||||
}
|
||||
|
||||
/**
|
||||
* State of all blocks in a file.
|
||||
*/
|
||||
private State[] state;
|
||||
|
||||
/**
|
||||
* The size of a file.
|
||||
*/
|
||||
private final long fileSize;
|
||||
|
||||
/**
|
||||
* The file is divided into blocks of this size.
|
||||
*/
|
||||
private final int blockSize;
|
||||
|
||||
/**
|
||||
* The file has these many blocks.
|
||||
*/
|
||||
private final int numBlocks;
|
||||
|
||||
/**
|
||||
* Constructs an instance of {@link BlockData}.
|
||||
* @param fileSize the size of a file.
|
||||
* @param blockSize the file is divided into blocks of this size.
|
||||
* @throws IllegalArgumentException if fileSize is negative.
|
||||
* @throws IllegalArgumentException if blockSize is negative.
|
||||
* @throws IllegalArgumentException if blockSize is zero or negative.
|
||||
*/
|
||||
public BlockData(long fileSize, int blockSize) {
|
||||
checkNotNegative(fileSize, "fileSize");
|
||||
if (fileSize == 0) {
|
||||
checkNotNegative(blockSize, "blockSize");
|
||||
} else {
|
||||
checkPositiveInteger(blockSize, "blockSize");
|
||||
}
|
||||
|
||||
this.fileSize = fileSize;
|
||||
this.blockSize = blockSize;
|
||||
this.numBlocks =
|
||||
(fileSize == 0)
|
||||
? 0
|
||||
: ((int) (fileSize / blockSize)) + (fileSize % blockSize > 0
|
||||
? 1
|
||||
: 0);
|
||||
this.state = new State[this.numBlocks];
|
||||
for (int b = 0; b < this.numBlocks; b++) {
|
||||
setState(b, State.NOT_READY);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the size of each block.
|
||||
* @return the size of each block.
|
||||
*/
|
||||
public int getBlockSize() {
|
||||
return blockSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the size of the associated file.
|
||||
* @return the size of the associated file.
|
||||
*/
|
||||
public long getFileSize() {
|
||||
return fileSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the number of blocks in the associated file.
|
||||
* @return the number of blocks in the associated file.
|
||||
*/
|
||||
public int getNumBlocks() {
|
||||
return numBlocks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given block is the last block in the associated file.
|
||||
* @param blockNumber the id of the desired block.
|
||||
* @return true if the given block is the last block in the associated file, false otherwise.
|
||||
* @throws IllegalArgumentException if blockNumber is invalid.
|
||||
*/
|
||||
public boolean isLastBlock(int blockNumber) {
|
||||
if (fileSize == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
throwIfInvalidBlockNumber(blockNumber);
|
||||
|
||||
return blockNumber == (numBlocks - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the id of the block that contains the given absolute offset.
|
||||
* @param offset the absolute offset to check.
|
||||
* @return the id of the block that contains the given absolute offset.
|
||||
* @throws IllegalArgumentException if offset is invalid.
|
||||
*/
|
||||
public int getBlockNumber(long offset) {
|
||||
throwIfInvalidOffset(offset);
|
||||
|
||||
return (int) (offset / blockSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the size of the given block.
|
||||
* @param blockNumber the id of the desired block.
|
||||
* @return the size of the given block.
|
||||
*/
|
||||
public int getSize(int blockNumber) {
|
||||
if (fileSize == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (isLastBlock(blockNumber)) {
|
||||
return (int) (fileSize - (((long) blockSize) * (numBlocks - 1)));
|
||||
} else {
|
||||
return blockSize;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given absolute offset is valid.
|
||||
* @param offset absolute offset in the file..
|
||||
* @return true if the given absolute offset is valid, false otherwise.
|
||||
*/
|
||||
public boolean isValidOffset(long offset) {
|
||||
return (offset >= 0) && (offset < fileSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the start offset of the given block.
|
||||
* @param blockNumber the id of the given block.
|
||||
* @return the start offset of the given block.
|
||||
* @throws IllegalArgumentException if blockNumber is invalid.
|
||||
*/
|
||||
public long getStartOffset(int blockNumber) {
|
||||
throwIfInvalidBlockNumber(blockNumber);
|
||||
|
||||
return blockNumber * (long) blockSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the relative offset corresponding to the given block and the absolute offset.
|
||||
* @param blockNumber the id of the given block.
|
||||
* @param offset absolute offset in the file.
|
||||
* @return the relative offset corresponding to the given block and the absolute offset.
|
||||
* @throws IllegalArgumentException if either blockNumber or offset is invalid.
|
||||
*/
|
||||
public int getRelativeOffset(int blockNumber, long offset) {
|
||||
throwIfInvalidOffset(offset);
|
||||
|
||||
return (int) (offset - getStartOffset(blockNumber));
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state of the given block.
|
||||
* @param blockNumber the id of the given block.
|
||||
* @return the state of the given block.
|
||||
* @throws IllegalArgumentException if blockNumber is invalid.
|
||||
*/
|
||||
public State getState(int blockNumber) {
|
||||
throwIfInvalidBlockNumber(blockNumber);
|
||||
|
||||
return state[blockNumber];
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the state of the given block to the given value.
|
||||
* @param blockNumber the id of the given block.
|
||||
* @param blockState the target state.
|
||||
* @throws IllegalArgumentException if blockNumber is invalid.
|
||||
*/
|
||||
public void setState(int blockNumber, State blockState) {
|
||||
throwIfInvalidBlockNumber(blockNumber);
|
||||
|
||||
state[blockNumber] = blockState;
|
||||
}
|
||||
|
||||
// Debug helper.
|
||||
public String getStateString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
int blockNumber = 0;
|
||||
while (blockNumber < numBlocks) {
|
||||
State tstate = getState(blockNumber);
|
||||
int endBlockNumber = blockNumber;
|
||||
while ((endBlockNumber < numBlocks) && (getState(endBlockNumber)
|
||||
== tstate)) {
|
||||
endBlockNumber++;
|
||||
}
|
||||
sb.append(
|
||||
String.format("[%03d ~ %03d] %s%n", blockNumber, endBlockNumber - 1,
|
||||
tstate));
|
||||
blockNumber = endBlockNumber;
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void throwIfInvalidBlockNumber(int blockNumber) {
|
||||
checkWithinRange(blockNumber, "blockNumber", 0, numBlocks - 1);
|
||||
}
|
||||
|
||||
private void throwIfInvalidOffset(long offset) {
|
||||
checkWithinRange(offset, "offset", 0, fileSize - 1);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
|
||||
|
||||
/**
|
||||
* Provides read access to the underlying file one block at a time.
|
||||
*
|
||||
* This class is the simplest form of a {@code BlockManager} that does
|
||||
* perform prefetching or caching.
|
||||
*/
|
||||
public abstract class BlockManager implements Closeable {
|
||||
|
||||
/**
|
||||
* Information about each block of the underlying file.
|
||||
*/
|
||||
private final BlockData blockData;
|
||||
|
||||
/**
|
||||
* Constructs an instance of {@code BlockManager}.
|
||||
*
|
||||
* @param blockData information about each block of the underlying file.
|
||||
*
|
||||
* @throws IllegalArgumentException if blockData is null.
|
||||
*/
|
||||
public BlockManager(BlockData blockData) {
|
||||
checkNotNull(blockData, "blockData");
|
||||
|
||||
this.blockData = blockData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets block data information.
|
||||
*
|
||||
* @return instance of {@code BlockData}.
|
||||
*/
|
||||
public BlockData getBlockData() {
|
||||
return blockData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the block having the given {@code blockNumber}.
|
||||
*
|
||||
* The entire block is read into memory and returned as a {@code BufferData}.
|
||||
* The blocks are treated as a limited resource and must be released when
|
||||
* one is done reading them.
|
||||
*
|
||||
* @param blockNumber the number of the block to be read and returned.
|
||||
* @return {@code BufferData} having data from the given block.
|
||||
*
|
||||
* @throws IOException if there an error reading the given block.
|
||||
* @throws IllegalArgumentException if blockNumber is negative.
|
||||
*/
|
||||
public BufferData get(int blockNumber) throws IOException {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
int size = blockData.getSize(blockNumber);
|
||||
ByteBuffer buffer = ByteBuffer.allocate(size);
|
||||
long startOffset = blockData.getStartOffset(blockNumber);
|
||||
read(buffer, startOffset, size);
|
||||
buffer.flip();
|
||||
return new BufferData(blockNumber, buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads into the given {@code buffer} {@code size} bytes from the underlying file
|
||||
* starting at {@code startOffset}.
|
||||
*
|
||||
* @param buffer the buffer to read data in to.
|
||||
* @param startOffset the offset at which reading starts.
|
||||
* @param size the number bytes to read.
|
||||
* @return number of bytes read.
|
||||
* @throws IOException if there an error reading the given block.
|
||||
*/
|
||||
public abstract int read(ByteBuffer buffer, long startOffset, int size) throws IOException;
|
||||
|
||||
/**
|
||||
* Releases resources allocated to the given block.
|
||||
*
|
||||
* @param data the {@code BufferData} to release.
|
||||
*
|
||||
* @throws IllegalArgumentException if data is null.
|
||||
*/
|
||||
public void release(BufferData data) {
|
||||
checkNotNull(data, "data");
|
||||
|
||||
// Do nothing because we allocate a new buffer each time.
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests optional prefetching of the given block.
|
||||
*
|
||||
* @param blockNumber the id of the block to prefetch.
|
||||
*
|
||||
* @throws IllegalArgumentException if blockNumber is negative.
|
||||
*/
|
||||
public void requestPrefetch(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
// Do nothing because we do not support prefetches.
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests cancellation of any previously issued prefetch requests.
|
||||
*/
|
||||
public void cancelPrefetches() {
|
||||
// Do nothing because we do not support prefetches.
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests that the given block should be copied to the cache. Optional operation.
|
||||
*
|
||||
* @param data the {@code BufferData} instance to optionally cache.
|
||||
*/
|
||||
public void requestCaching(BufferData data) {
|
||||
// Do nothing because we do not support caching.
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
}
|
|
@ -0,0 +1,425 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.DoubleSummaryStatistics;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
|
||||
/**
|
||||
* Block level operations performed on a file.
|
||||
* This class is meant to be used by {@code BlockManager}.
|
||||
* It is separated out in its own file due to its size.
|
||||
*
|
||||
* This class is used for debugging/logging. Calls to this class
|
||||
* can be safely removed without affecting the overall operation.
|
||||
*/
|
||||
public final class BlockOperations {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BlockOperations.class);
|
||||
|
||||
/**
|
||||
* Operation kind.
|
||||
*/
|
||||
public enum Kind {
|
||||
UNKNOWN("??", "unknown", false),
|
||||
CANCEL_PREFETCHES("CP", "cancelPrefetches", false),
|
||||
CLOSE("CX", "close", false),
|
||||
CACHE_PUT("C+", "putC", true),
|
||||
GET_CACHED("GC", "getCached", true),
|
||||
GET_PREFETCHED("GP", "getPrefetched", true),
|
||||
GET_READ("GR", "getRead", true),
|
||||
PREFETCH("PF", "prefetch", true),
|
||||
RELEASE("RL", "release", true),
|
||||
REQUEST_CACHING("RC", "requestCaching", true),
|
||||
REQUEST_PREFETCH("RP", "requestPrefetch", true);
|
||||
|
||||
private String shortName;
|
||||
private String name;
|
||||
private boolean hasBlock;
|
||||
|
||||
Kind(String shortName, String name, boolean hasBlock) {
|
||||
this.shortName = shortName;
|
||||
this.name = name;
|
||||
this.hasBlock = hasBlock;
|
||||
}
|
||||
|
||||
private static Map<String, Kind> shortNameToKind = new HashMap<>();
|
||||
|
||||
public static Kind fromShortName(String shortName) {
|
||||
if (shortNameToKind.isEmpty()) {
|
||||
for (Kind kind : Kind.values()) {
|
||||
shortNameToKind.put(kind.shortName, kind);
|
||||
}
|
||||
}
|
||||
return shortNameToKind.get(shortName);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Operation {
|
||||
private final Kind kind;
|
||||
private final int blockNumber;
|
||||
private final long timestamp;
|
||||
|
||||
public Operation(Kind kind, int blockNumber) {
|
||||
this.kind = kind;
|
||||
this.blockNumber = blockNumber;
|
||||
this.timestamp = System.nanoTime();
|
||||
}
|
||||
|
||||
public Kind getKind() {
|
||||
return kind;
|
||||
}
|
||||
|
||||
public int getBlockNumber() {
|
||||
return blockNumber;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void getSummary(StringBuilder sb) {
|
||||
if (kind.hasBlock) {
|
||||
sb.append(String.format("%s(%d)", kind.shortName, blockNumber));
|
||||
} else {
|
||||
sb.append(String.format("%s", kind.shortName));
|
||||
}
|
||||
}
|
||||
|
||||
public String getDebugInfo() {
|
||||
if (kind.hasBlock) {
|
||||
return String.format("--- %s(%d)", kind.name, blockNumber);
|
||||
} else {
|
||||
return String.format("... %s()", kind.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class End extends Operation {
|
||||
private Operation op;
|
||||
|
||||
public End(Operation op) {
|
||||
super(op.kind, op.blockNumber);
|
||||
this.op = op;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getSummary(StringBuilder sb) {
|
||||
sb.append("E");
|
||||
super.getSummary(sb);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDebugInfo() {
|
||||
return "***" + super.getDebugInfo().substring(3);
|
||||
}
|
||||
|
||||
public double duration() {
|
||||
return (getTimestamp() - op.getTimestamp()) / 1e9;
|
||||
}
|
||||
}
|
||||
|
||||
private ArrayList<Operation> ops;
|
||||
private boolean debugMode;
|
||||
|
||||
public BlockOperations() {
|
||||
this.ops = new ArrayList<>();
|
||||
}
|
||||
|
||||
public synchronized void setDebug(boolean state) {
|
||||
debugMode = state;
|
||||
}
|
||||
|
||||
private synchronized Operation add(Operation op) {
|
||||
if (debugMode) {
|
||||
LOG.info(op.getDebugInfo());
|
||||
}
|
||||
ops.add(op);
|
||||
return op;
|
||||
}
|
||||
|
||||
public Operation getPrefetched(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.GET_PREFETCHED, blockNumber));
|
||||
}
|
||||
|
||||
public Operation getCached(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.GET_CACHED, blockNumber));
|
||||
}
|
||||
|
||||
public Operation getRead(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.GET_READ, blockNumber));
|
||||
}
|
||||
|
||||
public Operation release(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.RELEASE, blockNumber));
|
||||
}
|
||||
|
||||
public Operation requestPrefetch(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.REQUEST_PREFETCH, blockNumber));
|
||||
}
|
||||
|
||||
public Operation prefetch(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.PREFETCH, blockNumber));
|
||||
}
|
||||
|
||||
public Operation cancelPrefetches() {
|
||||
return add(new Operation(Kind.CANCEL_PREFETCHES, -1));
|
||||
}
|
||||
|
||||
public Operation close() {
|
||||
return add(new Operation(Kind.CLOSE, -1));
|
||||
}
|
||||
|
||||
public Operation requestCaching(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.REQUEST_CACHING, blockNumber));
|
||||
}
|
||||
|
||||
public Operation addToCache(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
return add(new Operation(Kind.CACHE_PUT, blockNumber));
|
||||
}
|
||||
|
||||
public Operation end(Operation op) {
|
||||
return add(new End(op));
|
||||
}
|
||||
|
||||
private static void append(StringBuilder sb, String format, Object... args) {
|
||||
sb.append(String.format(format, args));
|
||||
}
|
||||
|
||||
public synchronized String getSummary(boolean showDebugInfo) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Operation op : ops) {
|
||||
if (op != null) {
|
||||
if (showDebugInfo) {
|
||||
sb.append(op.getDebugInfo());
|
||||
sb.append("\n");
|
||||
} else {
|
||||
op.getSummary(sb);
|
||||
sb.append(";");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sb.append("\n");
|
||||
getDurationInfo(sb);
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public synchronized void getDurationInfo(StringBuilder sb) {
|
||||
Map<Kind, DoubleSummaryStatistics> durations = new HashMap<>();
|
||||
for (Operation op : ops) {
|
||||
if (op instanceof End) {
|
||||
End endOp = (End) op;
|
||||
DoubleSummaryStatistics stats = durations.get(endOp.getKind());
|
||||
if (stats == null) {
|
||||
stats = new DoubleSummaryStatistics();
|
||||
durations.put(endOp.getKind(), stats);
|
||||
}
|
||||
stats.accept(endOp.duration());
|
||||
}
|
||||
}
|
||||
|
||||
List<Kind> kinds = Arrays.asList(
|
||||
Kind.GET_CACHED,
|
||||
Kind.GET_PREFETCHED,
|
||||
Kind.GET_READ,
|
||||
Kind.CACHE_PUT,
|
||||
Kind.PREFETCH,
|
||||
Kind.REQUEST_CACHING,
|
||||
Kind.REQUEST_PREFETCH,
|
||||
Kind.CANCEL_PREFETCHES,
|
||||
Kind.RELEASE,
|
||||
Kind.CLOSE
|
||||
);
|
||||
|
||||
for (Kind kind : kinds) {
|
||||
append(sb, "%-18s : ", kind);
|
||||
DoubleSummaryStatistics stats = durations.get(kind);
|
||||
if (stats == null) {
|
||||
append(sb, "--\n");
|
||||
} else {
|
||||
append(
|
||||
sb,
|
||||
"#ops = %3d, total = %5.1f, min: %3.1f, avg: %3.1f, max: %3.1f\n",
|
||||
stats.getCount(),
|
||||
stats.getSum(),
|
||||
stats.getMin(),
|
||||
stats.getAverage(),
|
||||
stats.getMax());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void analyze(StringBuilder sb) {
|
||||
Map<Integer, List<Operation>> blockOps = new HashMap<>();
|
||||
|
||||
// Group-by block number.
|
||||
for (Operation op : ops) {
|
||||
if (op.blockNumber < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
List<Operation> perBlockOps;
|
||||
if (!blockOps.containsKey(op.blockNumber)) {
|
||||
perBlockOps = new ArrayList<>();
|
||||
blockOps.put(op.blockNumber, perBlockOps);
|
||||
}
|
||||
|
||||
perBlockOps = blockOps.get(op.blockNumber);
|
||||
perBlockOps.add(op);
|
||||
}
|
||||
|
||||
List<Integer> prefetchedNotUsed = new ArrayList<>();
|
||||
List<Integer> cachedNotUsed = new ArrayList<>();
|
||||
|
||||
for (Map.Entry<Integer, List<Operation>> entry : blockOps.entrySet()) {
|
||||
Integer blockNumber = entry.getKey();
|
||||
List<Operation> perBlockOps = entry.getValue();
|
||||
Map<Kind, Integer> kindCounts = new HashMap<>();
|
||||
Map<Kind, Integer> endKindCounts = new HashMap<>();
|
||||
|
||||
for (Operation op : perBlockOps) {
|
||||
if (op instanceof End) {
|
||||
int endCount = endKindCounts.getOrDefault(op.kind, 0) + 1;
|
||||
endKindCounts.put(op.kind, endCount);
|
||||
} else {
|
||||
int count = kindCounts.getOrDefault(op.kind, 0) + 1;
|
||||
kindCounts.put(op.kind, count);
|
||||
}
|
||||
}
|
||||
|
||||
for (Kind kind : kindCounts.keySet()) {
|
||||
int count = kindCounts.getOrDefault(kind, 0);
|
||||
int endCount = endKindCounts.getOrDefault(kind, 0);
|
||||
if (count != endCount) {
|
||||
append(sb, "[%d] %s : #ops(%d) != #end-ops(%d)\n", blockNumber, kind, count, endCount);
|
||||
}
|
||||
|
||||
if (count > 1) {
|
||||
append(sb, "[%d] %s = %d\n", blockNumber, kind, count);
|
||||
}
|
||||
}
|
||||
|
||||
int prefetchCount = kindCounts.getOrDefault(Kind.PREFETCH, 0);
|
||||
int getPrefetchedCount = kindCounts.getOrDefault(Kind.GET_PREFETCHED, 0);
|
||||
if ((prefetchCount > 0) && (getPrefetchedCount < prefetchCount)) {
|
||||
prefetchedNotUsed.add(blockNumber);
|
||||
}
|
||||
|
||||
int cacheCount = kindCounts.getOrDefault(Kind.CACHE_PUT, 0);
|
||||
int getCachedCount = kindCounts.getOrDefault(Kind.GET_CACHED, 0);
|
||||
if ((cacheCount > 0) && (getCachedCount < cacheCount)) {
|
||||
cachedNotUsed.add(blockNumber);
|
||||
}
|
||||
}
|
||||
|
||||
if (!prefetchedNotUsed.isEmpty()) {
|
||||
append(sb, "Prefetched but not used: %s\n", getIntList(prefetchedNotUsed));
|
||||
}
|
||||
|
||||
if (!cachedNotUsed.isEmpty()) {
|
||||
append(sb, "Cached but not used: %s\n", getIntList(cachedNotUsed));
|
||||
}
|
||||
}
|
||||
|
||||
private static String getIntList(Iterable<Integer> nums) {
|
||||
List<String> numList = new ArrayList<>();
|
||||
for (Integer n : nums) {
|
||||
numList.add(n.toString());
|
||||
}
|
||||
return String.join(", ", numList);
|
||||
}
|
||||
|
||||
public static BlockOperations fromSummary(String summary) {
|
||||
BlockOperations ops = new BlockOperations();
|
||||
ops.setDebug(true);
|
||||
Pattern blockOpPattern = Pattern.compile("([A-Z+]+)(\\(([0-9]+)?\\))?");
|
||||
String[] tokens = summary.split(";");
|
||||
for (String token : tokens) {
|
||||
Matcher matcher = blockOpPattern.matcher(token);
|
||||
if (!matcher.matches()) {
|
||||
String message = String.format("Unknown summary format: %s", token);
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
|
||||
String shortName = matcher.group(1);
|
||||
String blockNumberStr = matcher.group(3);
|
||||
int blockNumber = (blockNumberStr == null) ? -1 : Integer.parseInt(blockNumberStr);
|
||||
Kind kind = Kind.fromShortName(shortName);
|
||||
Kind endKind = null;
|
||||
if (kind == null) {
|
||||
if (shortName.charAt(0) == 'E') {
|
||||
endKind = Kind.fromShortName(shortName.substring(1));
|
||||
}
|
||||
}
|
||||
|
||||
if (kind == null && endKind == null) {
|
||||
String message = String.format("Unknown short name: %s (token = %s)", shortName, token);
|
||||
throw new IllegalArgumentException(message);
|
||||
}
|
||||
|
||||
if (kind != null) {
|
||||
ops.add(new Operation(kind, blockNumber));
|
||||
} else {
|
||||
Operation op = null;
|
||||
for (int i = ops.ops.size() - 1; i >= 0; i--) {
|
||||
op = ops.ops.get(i);
|
||||
if ((op.blockNumber == blockNumber) && (op.kind == endKind) && !(op instanceof End)) {
|
||||
ops.add(new End(op));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (op == null) {
|
||||
LOG.warn("Start op not found: {}({})", endKind, blockNumber);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ops;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
|
||||
|
||||
/**
|
||||
* Manages a fixed pool of resources.
|
||||
*
|
||||
* Avoids creating a new resource if a previously created instance is already available.
|
||||
*/
|
||||
public abstract class BoundedResourcePool<T> extends ResourcePool<T> {
|
||||
/**
|
||||
* The size of this pool. Fixed at creation time.
|
||||
*/
|
||||
private final int size;
|
||||
|
||||
/**
|
||||
* Items currently available in the pool.
|
||||
*/
|
||||
private ArrayBlockingQueue<T> items;
|
||||
|
||||
/**
|
||||
* Items that have been created so far (regardless of whether they are currently available).
|
||||
*/
|
||||
private Set<T> createdItems;
|
||||
|
||||
/**
|
||||
* Constructs a resource pool of the given size.
|
||||
*
|
||||
* @param size the size of this pool. Cannot be changed post creation.
|
||||
*
|
||||
* @throws IllegalArgumentException if size is zero or negative.
|
||||
*/
|
||||
public BoundedResourcePool(int size) {
|
||||
Validate.checkPositiveInteger(size, "size");
|
||||
|
||||
this.size = size;
|
||||
this.items = new ArrayBlockingQueue<>(size);
|
||||
|
||||
// The created items are identified based on their object reference.
|
||||
this.createdItems = Collections.newSetFromMap(new IdentityHashMap<T, Boolean>());
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a resource blocking if necessary until one becomes available.
|
||||
*/
|
||||
@Override
|
||||
public T acquire() {
|
||||
return this.acquireHelper(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a resource blocking if one is immediately available. Otherwise returns null.
|
||||
*/
|
||||
@Override
|
||||
public T tryAcquire() {
|
||||
return this.acquireHelper(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases a previously acquired resource.
|
||||
*
|
||||
* @throws IllegalArgumentException if item is null.
|
||||
*/
|
||||
@Override
|
||||
public void release(T item) {
|
||||
checkNotNull(item, "item");
|
||||
|
||||
synchronized (createdItems) {
|
||||
if (!createdItems.contains(item)) {
|
||||
throw new IllegalArgumentException("This item is not a part of this pool");
|
||||
}
|
||||
}
|
||||
|
||||
// Return if this item was released earlier.
|
||||
// We cannot use items.contains() because that check is not based on reference equality.
|
||||
for (T entry : items) {
|
||||
if (entry == item) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
items.put(item);
|
||||
} catch (InterruptedException e) {
|
||||
throw new IllegalStateException("release() should never block", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
for (T item : createdItems) {
|
||||
close(item);
|
||||
}
|
||||
|
||||
items.clear();
|
||||
items = null;
|
||||
|
||||
createdItems.clear();
|
||||
createdItems = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived classes may implement a way to cleanup each item.
|
||||
*/
|
||||
@Override
|
||||
protected synchronized void close(T item) {
|
||||
// Do nothing in this class. Allow overriding classes to take any cleanup action.
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of items created so far. Mostly for testing purposes.
|
||||
* @return the count.
|
||||
*/
|
||||
public int numCreated() {
|
||||
synchronized (createdItems) {
|
||||
return createdItems.size();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of items available to be acquired. Mostly for testing purposes.
|
||||
* @return the number available.
|
||||
*/
|
||||
public synchronized int numAvailable() {
|
||||
return (size - numCreated()) + items.size();
|
||||
}
|
||||
|
||||
// For debugging purposes.
|
||||
@Override
|
||||
public synchronized String toString() {
|
||||
return String.format(
|
||||
"size = %d, #created = %d, #in-queue = %d, #available = %d",
|
||||
size, numCreated(), items.size(), numAvailable());
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived classes must implement a way to create an instance of a resource.
|
||||
*/
|
||||
protected abstract T createNew();
|
||||
|
||||
private T acquireHelper(boolean canBlock) {
|
||||
|
||||
// Prefer reusing an item if one is available.
|
||||
// That avoids unnecessarily creating new instances.
|
||||
T result = items.poll();
|
||||
if (result != null) {
|
||||
return result;
|
||||
}
|
||||
|
||||
synchronized (createdItems) {
|
||||
// Create a new instance if allowed by the capacity of this pool.
|
||||
if (createdItems.size() < size) {
|
||||
T item = createNew();
|
||||
createdItems.add(item);
|
||||
return item;
|
||||
}
|
||||
}
|
||||
|
||||
if (canBlock) {
|
||||
try {
|
||||
// Block for an instance to be available.
|
||||
return items.take();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,319 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.zip.CRC32;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Holds the state of a ByteBuffer that is in use by {@code CachingBlockManager}.
|
||||
*
|
||||
* This class is not meant to be of general use. It exists into its own file due to its size.
|
||||
* We use the term block and buffer interchangeably in this file because one buffer
|
||||
* holds exactly one block of data.
|
||||
*
|
||||
* Holding all of the state associated with a block allows us to validate and control
|
||||
* state transitions in a synchronized fashion.
|
||||
*/
|
||||
public final class BufferData {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BufferData.class);
|
||||
|
||||
public enum State {
|
||||
/**
|
||||
* Unknown / invalid state.
|
||||
*/
|
||||
UNKNOWN,
|
||||
|
||||
/**
|
||||
* Buffer has been acquired but has no data.
|
||||
*/
|
||||
BLANK,
|
||||
|
||||
/**
|
||||
* This block is being prefetched.
|
||||
*/
|
||||
PREFETCHING,
|
||||
|
||||
/**
|
||||
* This block is being added to the local cache.
|
||||
*/
|
||||
CACHING,
|
||||
|
||||
/**
|
||||
* This block has data and is ready to be read.
|
||||
*/
|
||||
READY,
|
||||
|
||||
/**
|
||||
* This block is no longer in-use and should not be used once in this state.
|
||||
*/
|
||||
DONE
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of the block associated with this buffer.
|
||||
*/
|
||||
private final int blockNumber;
|
||||
|
||||
/**
|
||||
* The buffer associated with this block.
|
||||
*/
|
||||
private ByteBuffer buffer;
|
||||
|
||||
/**
|
||||
* Current state of this block.
|
||||
*/
|
||||
private volatile State state;
|
||||
|
||||
/**
|
||||
* Future of the action being performed on this block (eg, prefetching or caching).
|
||||
*/
|
||||
private Future<Void> action;
|
||||
|
||||
/**
|
||||
* Checksum of the buffer contents once in READY state.
|
||||
*/
|
||||
private long checksum = 0;
|
||||
|
||||
/**
|
||||
* Constructs an instances of this class.
|
||||
*
|
||||
* @param blockNumber Number of the block associated with this buffer.
|
||||
* @param buffer The buffer associated with this block.
|
||||
*
|
||||
* @throws IllegalArgumentException if blockNumber is negative.
|
||||
* @throws IllegalArgumentException if buffer is null.
|
||||
*/
|
||||
public BufferData(int blockNumber, ByteBuffer buffer) {
|
||||
Validate.checkNotNegative(blockNumber, "blockNumber");
|
||||
Validate.checkNotNull(buffer, "buffer");
|
||||
|
||||
this.blockNumber = blockNumber;
|
||||
this.buffer = buffer;
|
||||
this.state = State.BLANK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the id of this block.
|
||||
*
|
||||
* @return the id of this block.
|
||||
*/
|
||||
public int getBlockNumber() {
|
||||
return this.blockNumber;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the buffer associated with this block.
|
||||
*
|
||||
* @return the buffer associated with this block.
|
||||
*/
|
||||
public ByteBuffer getBuffer() {
|
||||
return this.buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the state of this block.
|
||||
*
|
||||
* @return the state of this block.
|
||||
*/
|
||||
public State getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the checksum of data in this block.
|
||||
*
|
||||
* @return the checksum of data in this block.
|
||||
*/
|
||||
public long getChecksum() {
|
||||
return this.checksum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes CRC32 checksum of the given buffer's contents.
|
||||
*
|
||||
* @param buffer the buffer whose content's checksum is to be computed.
|
||||
* @return the computed checksum.
|
||||
*/
|
||||
public static long getChecksum(ByteBuffer buffer) {
|
||||
ByteBuffer tempBuffer = buffer.duplicate();
|
||||
tempBuffer.rewind();
|
||||
CRC32 crc32 = new CRC32();
|
||||
crc32.update(tempBuffer);
|
||||
return crc32.getValue();
|
||||
}
|
||||
|
||||
public synchronized Future<Void> getActionFuture() {
|
||||
return this.action;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that a prefetch operation is in progress.
|
||||
*
|
||||
* @param actionFuture the {@code Future} of a prefetch action.
|
||||
*
|
||||
* @throws IllegalArgumentException if actionFuture is null.
|
||||
*/
|
||||
public synchronized void setPrefetch(Future<Void> actionFuture) {
|
||||
Validate.checkNotNull(actionFuture, "actionFuture");
|
||||
|
||||
this.updateState(State.PREFETCHING, State.BLANK);
|
||||
this.action = actionFuture;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that a caching operation is in progress.
|
||||
*
|
||||
* @param actionFuture the {@code Future} of a caching action.
|
||||
*
|
||||
* @throws IllegalArgumentException if actionFuture is null.
|
||||
*/
|
||||
public synchronized void setCaching(Future<Void> actionFuture) {
|
||||
Validate.checkNotNull(actionFuture, "actionFuture");
|
||||
|
||||
this.throwIfStateIncorrect(State.PREFETCHING, State.READY);
|
||||
this.state = State.CACHING;
|
||||
this.action = actionFuture;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the completion of reading data into the buffer.
|
||||
* The buffer cannot be modified once in this state.
|
||||
*
|
||||
* @param expectedCurrentState the collection of states from which transition to READY is allowed.
|
||||
*/
|
||||
public synchronized void setReady(State... expectedCurrentState) {
|
||||
if (this.checksum != 0) {
|
||||
throw new IllegalStateException("Checksum cannot be changed once set");
|
||||
}
|
||||
|
||||
this.buffer = this.buffer.asReadOnlyBuffer();
|
||||
this.checksum = getChecksum(this.buffer);
|
||||
this.buffer.rewind();
|
||||
this.updateState(State.READY, expectedCurrentState);
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that this block is no longer of use and can be reclaimed.
|
||||
*/
|
||||
public synchronized void setDone() {
|
||||
if (this.checksum != 0) {
|
||||
if (getChecksum(this.buffer) != this.checksum) {
|
||||
throw new IllegalStateException("checksum changed after setReady()");
|
||||
}
|
||||
}
|
||||
this.state = State.DONE;
|
||||
this.action = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the current state to the specified value.
|
||||
* Asserts that the current state is as expected.
|
||||
* @param newState the state to transition to.
|
||||
* @param expectedCurrentState the collection of states from which
|
||||
* transition to {@code newState} is allowed.
|
||||
*
|
||||
* @throws IllegalArgumentException if newState is null.
|
||||
* @throws IllegalArgumentException if expectedCurrentState is null.
|
||||
*/
|
||||
public synchronized void updateState(State newState,
|
||||
State... expectedCurrentState) {
|
||||
Validate.checkNotNull(newState, "newState");
|
||||
Validate.checkNotNull(expectedCurrentState, "expectedCurrentState");
|
||||
|
||||
this.throwIfStateIncorrect(expectedCurrentState);
|
||||
this.state = newState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper that asserts the current state is one of the expected values.
|
||||
*
|
||||
* @param states the collection of allowed states.
|
||||
*
|
||||
* @throws IllegalArgumentException if states is null.
|
||||
*/
|
||||
public void throwIfStateIncorrect(State... states) {
|
||||
Validate.checkNotNull(states, "states");
|
||||
|
||||
if (this.stateEqualsOneOf(states)) {
|
||||
return;
|
||||
}
|
||||
|
||||
List<String> statesStr = new ArrayList<String>();
|
||||
for (State s : states) {
|
||||
statesStr.add(s.toString());
|
||||
}
|
||||
|
||||
String message = String.format(
|
||||
"Expected buffer state to be '%s' but found: %s",
|
||||
String.join(" or ", statesStr), this);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
|
||||
public boolean stateEqualsOneOf(State... states) {
|
||||
State currentState = this.state;
|
||||
|
||||
for (State s : states) {
|
||||
if (currentState == s) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
|
||||
return String.format(
|
||||
"[%03d] id: %03d, %s: buf: %s, checksum: %d, future: %s",
|
||||
this.blockNumber,
|
||||
System.identityHashCode(this),
|
||||
this.state,
|
||||
this.getBufferStr(this.buffer),
|
||||
this.checksum,
|
||||
this.getFutureStr(this.action));
|
||||
}
|
||||
|
||||
private String getFutureStr(Future<Void> f) {
|
||||
if (f == null) {
|
||||
return "--";
|
||||
} else {
|
||||
return this.action.isDone() ? "done" : "not done";
|
||||
}
|
||||
}
|
||||
|
||||
private String getBufferStr(ByteBuffer buf) {
|
||||
if (buf == null) {
|
||||
return "--";
|
||||
} else {
|
||||
return String.format(
|
||||
"(id = %d, pos = %d, lim = %d)",
|
||||
System.identityHashCode(buf),
|
||||
buf.position(), buf.limit());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkState;
|
||||
import static org.apache.hadoop.util.Preconditions.checkArgument;
|
||||
import static org.apache.hadoop.util.Preconditions.checkNotNull;
|
||||
|
||||
/**
|
||||
* Manages a fixed pool of {@code ByteBuffer} instances.
|
||||
* <p>
|
||||
* Avoids creating a new buffer if a previously created buffer is already available.
|
||||
*/
|
||||
public class BufferPool implements Closeable {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BufferPool.class);
|
||||
|
||||
/**
|
||||
* Max number of buffers in this pool.
|
||||
*/
|
||||
private final int size;
|
||||
|
||||
/**
|
||||
* Size in bytes of each buffer.
|
||||
*/
|
||||
private final int bufferSize;
|
||||
|
||||
/*
|
||||
Invariants for internal state.
|
||||
-- a buffer is either in this.pool or in this.allocated
|
||||
-- transition between this.pool <==> this.allocated must be atomic
|
||||
-- only one buffer allocated for a given blockNumber
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Underlying bounded resource pool.
|
||||
*/
|
||||
private BoundedResourcePool<ByteBuffer> pool;
|
||||
|
||||
/**
|
||||
* Allows associating metadata to each buffer in the pool.
|
||||
*/
|
||||
private Map<BufferData, ByteBuffer> allocated;
|
||||
|
||||
/**
|
||||
* Prefetching stats.
|
||||
*/
|
||||
private PrefetchingStatistics prefetchingStatistics;
|
||||
|
||||
/**
|
||||
* Initializes a new instance of the {@code BufferPool} class.
|
||||
* @param size number of buffer in this pool.
|
||||
* @param bufferSize size in bytes of each buffer.
|
||||
* @param prefetchingStatistics statistics for this stream.
|
||||
* @throws IllegalArgumentException if size is zero or negative.
|
||||
* @throws IllegalArgumentException if bufferSize is zero or negative.
|
||||
*/
|
||||
public BufferPool(int size,
|
||||
int bufferSize,
|
||||
PrefetchingStatistics prefetchingStatistics) {
|
||||
Validate.checkPositiveInteger(size, "size");
|
||||
Validate.checkPositiveInteger(bufferSize, "bufferSize");
|
||||
|
||||
this.size = size;
|
||||
this.bufferSize = bufferSize;
|
||||
this.allocated = new IdentityHashMap<BufferData, ByteBuffer>();
|
||||
this.prefetchingStatistics = requireNonNull(prefetchingStatistics);
|
||||
this.pool = new BoundedResourcePool<ByteBuffer>(size) {
|
||||
@Override
|
||||
public ByteBuffer createNew() {
|
||||
ByteBuffer buffer = ByteBuffer.allocate(bufferSize);
|
||||
prefetchingStatistics.memoryAllocated(bufferSize);
|
||||
return buffer;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a list of all blocks in this pool.
|
||||
* @return a list of all blocks in this pool.
|
||||
*/
|
||||
public List<BufferData> getAll() {
|
||||
synchronized (allocated) {
|
||||
return Collections.unmodifiableList(new ArrayList<>(allocated.keySet()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a {@code ByteBuffer}; blocking if necessary until one becomes available.
|
||||
* @param blockNumber the id of the block to acquire.
|
||||
* @return the acquired block's {@code BufferData}.
|
||||
*/
|
||||
public synchronized BufferData acquire(int blockNumber) {
|
||||
BufferData data;
|
||||
final int maxRetryDelayMs = 600 * 1000;
|
||||
final int statusUpdateDelayMs = 120 * 1000;
|
||||
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
|
||||
|
||||
do {
|
||||
if (retryer.updateStatus()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("waiting to acquire block: {}", blockNumber);
|
||||
LOG.debug("state = {}", this);
|
||||
}
|
||||
releaseReadyBlock(blockNumber);
|
||||
}
|
||||
data = tryAcquire(blockNumber);
|
||||
}
|
||||
while ((data == null) && retryer.continueRetry());
|
||||
|
||||
if (data != null) {
|
||||
return data;
|
||||
} else {
|
||||
String message =
|
||||
String.format("Wait failed for acquire(%d)", blockNumber);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a buffer if one is immediately available. Otherwise returns null.
|
||||
* @param blockNumber the id of the block to try acquire.
|
||||
* @return the acquired block's {@code BufferData} or null.
|
||||
*/
|
||||
public synchronized BufferData tryAcquire(int blockNumber) {
|
||||
return acquireHelper(blockNumber, false);
|
||||
}
|
||||
|
||||
private synchronized BufferData acquireHelper(int blockNumber,
|
||||
boolean canBlock) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
releaseDoneBlocks();
|
||||
|
||||
BufferData data = find(blockNumber);
|
||||
if (data != null) {
|
||||
return data;
|
||||
}
|
||||
|
||||
ByteBuffer buffer = canBlock ? pool.acquire() : pool.tryAcquire();
|
||||
if (buffer == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
buffer.clear();
|
||||
data = new BufferData(blockNumber, buffer.duplicate());
|
||||
|
||||
synchronized (allocated) {
|
||||
checkState(find(blockNumber) == null, "buffer data already exists");
|
||||
|
||||
allocated.put(data, buffer);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases resources for any blocks marked as 'done'.
|
||||
*/
|
||||
private synchronized void releaseDoneBlocks() {
|
||||
for (BufferData data : getAll()) {
|
||||
if (data.stateEqualsOneOf(BufferData.State.DONE)) {
|
||||
release(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If no blocks were released after calling releaseDoneBlocks() a few times,
|
||||
* we may end up waiting forever. To avoid that situation, we try releasing
|
||||
* a 'ready' block farthest away from the given block.
|
||||
*/
|
||||
private synchronized void releaseReadyBlock(int blockNumber) {
|
||||
BufferData releaseTarget = null;
|
||||
for (BufferData data : getAll()) {
|
||||
if (data.stateEqualsOneOf(BufferData.State.READY)) {
|
||||
if (releaseTarget == null) {
|
||||
releaseTarget = data;
|
||||
} else {
|
||||
if (distance(data, blockNumber) > distance(releaseTarget,
|
||||
blockNumber)) {
|
||||
releaseTarget = data;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (releaseTarget != null) {
|
||||
LOG.warn("releasing 'ready' block: {}", releaseTarget);
|
||||
releaseTarget.setDone();
|
||||
}
|
||||
}
|
||||
|
||||
private int distance(BufferData data, int blockNumber) {
|
||||
return Math.abs(data.getBlockNumber() - blockNumber);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases a previously acquired resource.
|
||||
* @param data the {@code BufferData} instance to release.
|
||||
* @throws IllegalArgumentException if data is null.
|
||||
* @throws IllegalArgumentException if data cannot be released due to its state.
|
||||
*/
|
||||
public synchronized void release(BufferData data) {
|
||||
checkNotNull(data, "data");
|
||||
|
||||
synchronized (data) {
|
||||
checkArgument(
|
||||
canRelease(data),
|
||||
String.format("Unable to release buffer: %s", data));
|
||||
|
||||
ByteBuffer buffer = allocated.get(data);
|
||||
if (buffer == null) {
|
||||
// Likely released earlier.
|
||||
return;
|
||||
}
|
||||
buffer.clear();
|
||||
pool.release(buffer);
|
||||
allocated.remove(data);
|
||||
}
|
||||
|
||||
releaseDoneBlocks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
for (BufferData data : getAll()) {
|
||||
Future<Void> actionFuture = data.getActionFuture();
|
||||
if (actionFuture != null) {
|
||||
actionFuture.cancel(true);
|
||||
}
|
||||
}
|
||||
|
||||
int currentPoolSize = pool.numCreated();
|
||||
|
||||
pool.close();
|
||||
pool = null;
|
||||
|
||||
allocated.clear();
|
||||
allocated = null;
|
||||
|
||||
prefetchingStatistics.memoryFreed(currentPoolSize * bufferSize);
|
||||
}
|
||||
|
||||
// For debugging purposes.
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(pool.toString());
|
||||
sb.append("\n");
|
||||
List<BufferData> allData = new ArrayList<>(getAll());
|
||||
Collections.sort(allData,
|
||||
(d1, d2) -> d1.getBlockNumber() - d2.getBlockNumber());
|
||||
for (BufferData data : allData) {
|
||||
sb.append(data.toString());
|
||||
sb.append("\n");
|
||||
}
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
// Number of ByteBuffers created so far.
|
||||
public synchronized int numCreated() {
|
||||
return pool.numCreated();
|
||||
}
|
||||
|
||||
// Number of ByteBuffers available to be acquired.
|
||||
public synchronized int numAvailable() {
|
||||
releaseDoneBlocks();
|
||||
return pool.numAvailable();
|
||||
}
|
||||
|
||||
private BufferData find(int blockNumber) {
|
||||
synchronized (allocated) {
|
||||
for (BufferData data : allocated.keySet()) {
|
||||
if ((data.getBlockNumber() == blockNumber)
|
||||
&& !data.stateEqualsOneOf(BufferData.State.DONE)) {
|
||||
return data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean canRelease(BufferData data) {
|
||||
return data.stateEqualsOneOf(
|
||||
BufferData.State.DONE,
|
||||
BufferData.State.READY);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,654 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
import org.apache.hadoop.fs.statistics.DurationTracker;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
|
||||
|
||||
/**
|
||||
* Provides read access to the underlying file one block at a time.
|
||||
* Improve read performance by prefetching and locall caching blocks.
|
||||
*/
|
||||
public abstract class CachingBlockManager extends BlockManager {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CachingBlockManager.class);
|
||||
private static final int TIMEOUT_MINUTES = 60;
|
||||
|
||||
/**
|
||||
* Asynchronous tasks are performed in this pool.
|
||||
*/
|
||||
private final ExecutorServiceFuturePool futurePool;
|
||||
|
||||
/**
|
||||
* Pool of shared ByteBuffer instances.
|
||||
*/
|
||||
private BufferPool bufferPool;
|
||||
|
||||
/**
|
||||
* Size of the in-memory cache in terms of number of blocks.
|
||||
* Total memory consumption is up to bufferPoolSize * blockSize.
|
||||
*/
|
||||
private final int bufferPoolSize;
|
||||
|
||||
/**
|
||||
* Local block cache.
|
||||
*/
|
||||
private BlockCache cache;
|
||||
|
||||
/**
|
||||
* Error counts. For testing purposes.
|
||||
*/
|
||||
private final AtomicInteger numCachingErrors;
|
||||
private final AtomicInteger numReadErrors;
|
||||
|
||||
/**
|
||||
* Operations performed by this block manager.
|
||||
*/
|
||||
private final BlockOperations ops;
|
||||
|
||||
private boolean closed;
|
||||
|
||||
/**
|
||||
* If a single caching operation takes more than this time (in seconds),
|
||||
* we disable caching to prevent further perf degradation due to caching.
|
||||
*/
|
||||
private static final int SLOW_CACHING_THRESHOLD = 5;
|
||||
|
||||
/**
|
||||
* Once set to true, any further caching requests will be ignored.
|
||||
*/
|
||||
private final AtomicBoolean cachingDisabled;
|
||||
|
||||
private final PrefetchingStatistics prefetchingStatistics;
|
||||
|
||||
private final Configuration conf;
|
||||
|
||||
private final LocalDirAllocator localDirAllocator;
|
||||
|
||||
/**
|
||||
* Constructs an instance of a {@code CachingBlockManager}.
|
||||
*
|
||||
* @param futurePool asynchronous tasks are performed in this pool.
|
||||
* @param blockData information about each block of the underlying file.
|
||||
* @param bufferPoolSize size of the in-memory cache in terms of number of blocks.
|
||||
* @param prefetchingStatistics statistics for this stream.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @throws IllegalArgumentException if bufferPoolSize is zero or negative.
|
||||
*/
|
||||
public CachingBlockManager(
|
||||
ExecutorServiceFuturePool futurePool,
|
||||
BlockData blockData,
|
||||
int bufferPoolSize,
|
||||
PrefetchingStatistics prefetchingStatistics,
|
||||
Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) {
|
||||
super(blockData);
|
||||
|
||||
Validate.checkPositiveInteger(bufferPoolSize, "bufferPoolSize");
|
||||
|
||||
this.futurePool = requireNonNull(futurePool);
|
||||
this.bufferPoolSize = bufferPoolSize;
|
||||
this.numCachingErrors = new AtomicInteger();
|
||||
this.numReadErrors = new AtomicInteger();
|
||||
this.cachingDisabled = new AtomicBoolean();
|
||||
this.prefetchingStatistics = requireNonNull(prefetchingStatistics);
|
||||
|
||||
if (this.getBlockData().getFileSize() > 0) {
|
||||
this.bufferPool = new BufferPool(bufferPoolSize, this.getBlockData().getBlockSize(),
|
||||
this.prefetchingStatistics);
|
||||
this.cache = this.createCache();
|
||||
}
|
||||
|
||||
this.ops = new BlockOperations();
|
||||
this.ops.setDebug(false);
|
||||
this.conf = requireNonNull(conf);
|
||||
this.localDirAllocator = localDirAllocator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the block having the given {@code blockNumber}.
|
||||
*
|
||||
* @throws IllegalArgumentException if blockNumber is negative.
|
||||
*/
|
||||
@Override
|
||||
public BufferData get(int blockNumber) throws IOException {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
BufferData data;
|
||||
final int maxRetryDelayMs = bufferPoolSize * 120 * 1000;
|
||||
final int statusUpdateDelayMs = 120 * 1000;
|
||||
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
|
||||
boolean done;
|
||||
|
||||
do {
|
||||
if (closed) {
|
||||
throw new IOException("this stream is already closed");
|
||||
}
|
||||
|
||||
data = bufferPool.acquire(blockNumber);
|
||||
done = getInternal(data);
|
||||
|
||||
if (retryer.updateStatus()) {
|
||||
LOG.warn("waiting to get block: {}", blockNumber);
|
||||
LOG.info("state = {}", this.toString());
|
||||
}
|
||||
}
|
||||
while (!done && retryer.continueRetry());
|
||||
|
||||
if (done) {
|
||||
return data;
|
||||
} else {
|
||||
String message = String.format("Wait failed for get(%d)", blockNumber);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean getInternal(BufferData data) throws IOException {
|
||||
Validate.checkNotNull(data, "data");
|
||||
|
||||
// Opportunistic check without locking.
|
||||
if (data.stateEqualsOneOf(
|
||||
BufferData.State.PREFETCHING,
|
||||
BufferData.State.CACHING,
|
||||
BufferData.State.DONE)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
synchronized (data) {
|
||||
// Reconfirm state after locking.
|
||||
if (data.stateEqualsOneOf(
|
||||
BufferData.State.PREFETCHING,
|
||||
BufferData.State.CACHING,
|
||||
BufferData.State.DONE)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int blockNumber = data.getBlockNumber();
|
||||
if (data.getState() == BufferData.State.READY) {
|
||||
BlockOperations.Operation op = ops.getPrefetched(blockNumber);
|
||||
ops.end(op);
|
||||
return true;
|
||||
}
|
||||
|
||||
data.throwIfStateIncorrect(BufferData.State.BLANK);
|
||||
read(data);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases resources allocated to the given block.
|
||||
*
|
||||
* @throws IllegalArgumentException if data is null.
|
||||
*/
|
||||
@Override
|
||||
public void release(BufferData data) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
Validate.checkNotNull(data, "data");
|
||||
|
||||
BlockOperations.Operation op = ops.release(data.getBlockNumber());
|
||||
bufferPool.release(data);
|
||||
ops.end(op);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
closed = true;
|
||||
|
||||
final BlockOperations.Operation op = ops.close();
|
||||
|
||||
// Cancel any prefetches in progress.
|
||||
cancelPrefetches();
|
||||
|
||||
cleanupWithLogger(LOG, cache);
|
||||
|
||||
ops.end(op);
|
||||
LOG.info(ops.getSummary(false));
|
||||
|
||||
bufferPool.close();
|
||||
bufferPool = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests optional prefetching of the given block.
|
||||
* The block is prefetched only if we can acquire a free buffer.
|
||||
*
|
||||
* @throws IllegalArgumentException if blockNumber is negative.
|
||||
*/
|
||||
@Override
|
||||
public void requestPrefetch(int blockNumber) {
|
||||
checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
// We initiate a prefetch only if we can acquire a buffer from the shared pool.
|
||||
BufferData data = bufferPool.tryAcquire(blockNumber);
|
||||
if (data == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Opportunistic check without locking.
|
||||
if (!data.stateEqualsOneOf(BufferData.State.BLANK)) {
|
||||
// The block is ready or being prefetched/cached.
|
||||
return;
|
||||
}
|
||||
|
||||
synchronized (data) {
|
||||
// Reconfirm state after locking.
|
||||
if (!data.stateEqualsOneOf(BufferData.State.BLANK)) {
|
||||
// The block is ready or being prefetched/cached.
|
||||
return;
|
||||
}
|
||||
|
||||
BlockOperations.Operation op = ops.requestPrefetch(blockNumber);
|
||||
PrefetchTask prefetchTask = new PrefetchTask(data, this, Instant.now());
|
||||
Future<Void> prefetchFuture = futurePool.executeFunction(prefetchTask);
|
||||
data.setPrefetch(prefetchFuture);
|
||||
ops.end(op);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests cancellation of any previously issued prefetch requests.
|
||||
*/
|
||||
@Override
|
||||
public void cancelPrefetches() {
|
||||
BlockOperations.Operation op = ops.cancelPrefetches();
|
||||
|
||||
for (BufferData data : bufferPool.getAll()) {
|
||||
// We add blocks being prefetched to the local cache so that the prefetch is not wasted.
|
||||
if (data.stateEqualsOneOf(BufferData.State.PREFETCHING, BufferData.State.READY)) {
|
||||
requestCaching(data);
|
||||
}
|
||||
}
|
||||
|
||||
ops.end(op);
|
||||
}
|
||||
|
||||
private void read(BufferData data) throws IOException {
|
||||
synchronized (data) {
|
||||
try {
|
||||
readBlock(data, false, BufferData.State.BLANK);
|
||||
} catch (IOException e) {
|
||||
LOG.error("error reading block {}", data.getBlockNumber(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void prefetch(BufferData data, Instant taskQueuedStartTime) throws IOException {
|
||||
synchronized (data) {
|
||||
prefetchingStatistics.executorAcquired(
|
||||
Duration.between(taskQueuedStartTime, Instant.now()));
|
||||
readBlock(
|
||||
data,
|
||||
true,
|
||||
BufferData.State.PREFETCHING,
|
||||
BufferData.State.CACHING);
|
||||
}
|
||||
}
|
||||
|
||||
private void readBlock(BufferData data, boolean isPrefetch, BufferData.State... expectedState)
|
||||
throws IOException {
|
||||
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
BlockOperations.Operation op = null;
|
||||
DurationTracker tracker = null;
|
||||
|
||||
synchronized (data) {
|
||||
try {
|
||||
if (data.stateEqualsOneOf(BufferData.State.DONE, BufferData.State.READY)) {
|
||||
// DONE : Block was released, likely due to caching being disabled on slow perf.
|
||||
// READY : Block was already fetched by another thread. No need to re-read.
|
||||
return;
|
||||
}
|
||||
|
||||
data.throwIfStateIncorrect(expectedState);
|
||||
int blockNumber = data.getBlockNumber();
|
||||
|
||||
// Prefer reading from cache over reading from network.
|
||||
if (cache.containsBlock(blockNumber)) {
|
||||
op = ops.getCached(blockNumber);
|
||||
cache.get(blockNumber, data.getBuffer());
|
||||
data.setReady(expectedState);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isPrefetch) {
|
||||
tracker = prefetchingStatistics.prefetchOperationStarted();
|
||||
op = ops.prefetch(data.getBlockNumber());
|
||||
} else {
|
||||
op = ops.getRead(data.getBlockNumber());
|
||||
}
|
||||
|
||||
long offset = getBlockData().getStartOffset(data.getBlockNumber());
|
||||
int size = getBlockData().getSize(data.getBlockNumber());
|
||||
ByteBuffer buffer = data.getBuffer();
|
||||
buffer.clear();
|
||||
read(buffer, offset, size);
|
||||
buffer.flip();
|
||||
data.setReady(expectedState);
|
||||
} catch (Exception e) {
|
||||
if (isPrefetch && tracker != null) {
|
||||
tracker.failed();
|
||||
}
|
||||
|
||||
numReadErrors.incrementAndGet();
|
||||
data.setDone();
|
||||
throw e;
|
||||
} finally {
|
||||
if (op != null) {
|
||||
ops.end(op);
|
||||
}
|
||||
|
||||
if (isPrefetch) {
|
||||
prefetchingStatistics.prefetchOperationCompleted();
|
||||
if (tracker != null) {
|
||||
tracker.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read task that is submitted to the future pool.
|
||||
*/
|
||||
private static class PrefetchTask implements Supplier<Void> {
|
||||
private final BufferData data;
|
||||
private final CachingBlockManager blockManager;
|
||||
private final Instant taskQueuedStartTime;
|
||||
|
||||
PrefetchTask(BufferData data, CachingBlockManager blockManager, Instant taskQueuedStartTime) {
|
||||
this.data = data;
|
||||
this.blockManager = blockManager;
|
||||
this.taskQueuedStartTime = taskQueuedStartTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void get() {
|
||||
try {
|
||||
blockManager.prefetch(data, taskQueuedStartTime);
|
||||
} catch (Exception e) {
|
||||
LOG.info("error prefetching block {}. {}", data.getBlockNumber(), e.getMessage());
|
||||
LOG.debug("error prefetching block {}", data.getBlockNumber(), e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static final BufferData.State[] EXPECTED_STATE_AT_CACHING =
|
||||
new BufferData.State[] {
|
||||
BufferData.State.PREFETCHING, BufferData.State.READY
|
||||
};
|
||||
|
||||
/**
|
||||
* Requests that the given block should be copied to the local cache.
|
||||
* The block must not be accessed by the caller after calling this method
|
||||
* because it will released asynchronously relative to the caller.
|
||||
*
|
||||
* @throws IllegalArgumentException if data is null.
|
||||
*/
|
||||
@Override
|
||||
public void requestCaching(BufferData data) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cachingDisabled.get()) {
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
Validate.checkNotNull(data, "data");
|
||||
|
||||
// Opportunistic check without locking.
|
||||
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING)) {
|
||||
return;
|
||||
}
|
||||
|
||||
synchronized (data) {
|
||||
// Reconfirm state after locking.
|
||||
if (!data.stateEqualsOneOf(EXPECTED_STATE_AT_CACHING)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cache.containsBlock(data.getBlockNumber())) {
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
BufferData.State state = data.getState();
|
||||
|
||||
BlockOperations.Operation op = ops.requestCaching(data.getBlockNumber());
|
||||
Future<Void> blockFuture;
|
||||
if (state == BufferData.State.PREFETCHING) {
|
||||
blockFuture = data.getActionFuture();
|
||||
} else {
|
||||
CompletableFuture<Void> cf = new CompletableFuture<>();
|
||||
cf.complete(null);
|
||||
blockFuture = cf;
|
||||
}
|
||||
|
||||
CachePutTask task =
|
||||
new CachePutTask(data, blockFuture, this, Instant.now());
|
||||
Future<Void> actionFuture = futurePool.executeFunction(task);
|
||||
data.setCaching(actionFuture);
|
||||
ops.end(op);
|
||||
}
|
||||
}
|
||||
|
||||
private void addToCacheAndRelease(BufferData data, Future<Void> blockFuture,
|
||||
Instant taskQueuedStartTime) {
|
||||
prefetchingStatistics.executorAcquired(
|
||||
Duration.between(taskQueuedStartTime, Instant.now()));
|
||||
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cachingDisabled.get()) {
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
blockFuture.get(TIMEOUT_MINUTES, TimeUnit.MINUTES);
|
||||
if (data.stateEqualsOneOf(BufferData.State.DONE)) {
|
||||
// There was an error during prefetch.
|
||||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("error waiting on blockFuture: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error waiting on blockFuture: {}", data, e);
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
if (cachingDisabled.get()) {
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
BlockOperations.Operation op = null;
|
||||
|
||||
synchronized (data) {
|
||||
try {
|
||||
if (data.stateEqualsOneOf(BufferData.State.DONE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (cache.containsBlock(data.getBlockNumber())) {
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
||||
op = ops.addToCache(data.getBlockNumber());
|
||||
ByteBuffer buffer = data.getBuffer().duplicate();
|
||||
buffer.rewind();
|
||||
cachePut(data.getBlockNumber(), buffer);
|
||||
data.setDone();
|
||||
} catch (Exception e) {
|
||||
numCachingErrors.incrementAndGet();
|
||||
LOG.info("error adding block to cache after wait: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error adding block to cache after wait: {}", data, e);
|
||||
data.setDone();
|
||||
}
|
||||
|
||||
if (op != null) {
|
||||
BlockOperations.End endOp = (BlockOperations.End) ops.end(op);
|
||||
if (endOp.duration() > SLOW_CACHING_THRESHOLD) {
|
||||
if (!cachingDisabled.getAndSet(true)) {
|
||||
String message = String.format(
|
||||
"Caching disabled because of slow operation (%.1f sec)", endOp.duration());
|
||||
LOG.warn(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected BlockCache createCache() {
|
||||
return new SingleFilePerBlockCache(prefetchingStatistics);
|
||||
}
|
||||
|
||||
protected void cachePut(int blockNumber, ByteBuffer buffer) throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
cache.put(blockNumber, buffer, conf, localDirAllocator);
|
||||
}
|
||||
|
||||
private static class CachePutTask implements Supplier<Void> {
|
||||
private final BufferData data;
|
||||
|
||||
// Block being asynchronously fetched.
|
||||
private final Future<Void> blockFuture;
|
||||
|
||||
// Block manager that manages this block.
|
||||
private final CachingBlockManager blockManager;
|
||||
|
||||
private final Instant taskQueuedStartTime;
|
||||
|
||||
CachePutTask(
|
||||
BufferData data,
|
||||
Future<Void> blockFuture,
|
||||
CachingBlockManager blockManager,
|
||||
Instant taskQueuedStartTime) {
|
||||
this.data = data;
|
||||
this.blockFuture = blockFuture;
|
||||
this.blockManager = blockManager;
|
||||
this.taskQueuedStartTime = taskQueuedStartTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void get() {
|
||||
blockManager.addToCacheAndRelease(data, blockFuture, taskQueuedStartTime);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of ByteBuffers available to be acquired.
|
||||
*
|
||||
* @return the number of available buffers.
|
||||
*/
|
||||
public int numAvailable() {
|
||||
return bufferPool.numAvailable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of caching operations completed.
|
||||
*
|
||||
* @return the number of cached buffers.
|
||||
*/
|
||||
public int numCached() {
|
||||
return cache.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of errors encountered when caching.
|
||||
*
|
||||
* @return the number of errors encountered when caching.
|
||||
*/
|
||||
public int numCachingErrors() {
|
||||
return numCachingErrors.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of errors encountered when reading.
|
||||
*
|
||||
* @return the number of errors encountered when reading.
|
||||
*/
|
||||
public int numReadErrors() {
|
||||
return numReadErrors.get();
|
||||
}
|
||||
|
||||
BufferData getData(int blockNumber) {
|
||||
return bufferPool.tryAcquire(blockNumber);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
||||
sb.append("cache(");
|
||||
sb.append(cache.toString());
|
||||
sb.append("); ");
|
||||
|
||||
sb.append("pool: ");
|
||||
sb.append(bufferPool.toString());
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.time.Duration;
|
||||
|
||||
import org.apache.hadoop.fs.statistics.DurationTracker;
|
||||
|
||||
import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTracker;
|
||||
|
||||
/**
|
||||
* Empty implementation of the prefetching statistics interface.
|
||||
*/
|
||||
public final class EmptyPrefetchingStatistics
|
||||
implements PrefetchingStatistics {
|
||||
|
||||
private static final EmptyPrefetchingStatistics
|
||||
EMPTY_PREFETCHING_STATISTICS =
|
||||
new EmptyPrefetchingStatistics();
|
||||
|
||||
private EmptyPrefetchingStatistics() {
|
||||
}
|
||||
|
||||
public static EmptyPrefetchingStatistics getInstance() {
|
||||
return EMPTY_PREFETCHING_STATISTICS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DurationTracker prefetchOperationStarted() {
|
||||
return stubDurationTracker();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void blockAddedToFileCache() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void blockRemovedFromFileCache() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prefetchOperationCompleted() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void executorAcquired(Duration timeInQueue) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void memoryAllocated(int size) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void memoryFreed(int size) {
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||
|
||||
/**
|
||||
* A FuturePool implementation backed by a java.util.concurrent.ExecutorService.
|
||||
*
|
||||
* If a piece of work has started, it cannot (currently) be cancelled.
|
||||
*
|
||||
* This class is a simplified version of <code>com.twitter:util-core_2.11</code>
|
||||
* ExecutorServiceFuturePool designed to avoid depending on that Scala library.
|
||||
* One problem with using a Scala library is that many downstream projects
|
||||
* (eg Apache Spark) use Scala, and they might want to use a different version of Scala
|
||||
* from the version that Hadoop chooses to use.
|
||||
*
|
||||
*/
|
||||
public class ExecutorServiceFuturePool {
|
||||
|
||||
private final ExecutorService executor;
|
||||
|
||||
public ExecutorServiceFuturePool(ExecutorService executor) {
|
||||
this.executor = executor;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param f function to run in future on executor pool
|
||||
* @return future
|
||||
* @throws java.util.concurrent.RejectedExecutionException can be thrown
|
||||
* @throws NullPointerException if f param is null
|
||||
*/
|
||||
public Future<Void> executeFunction(final Supplier<Void> f) {
|
||||
return executor.submit(f::get);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param r runnable to run in future on executor pool
|
||||
* @return future
|
||||
* @throws java.util.concurrent.RejectedExecutionException can be thrown
|
||||
* @throws NullPointerException if r param is null
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Future<Void> executeRunnable(final Runnable r) {
|
||||
return (Future<Void>) executor.submit(r::run);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
|
||||
* certain timeout for the ExecutorService to gracefully shutdown.
|
||||
*
|
||||
* @param logger Logger
|
||||
* @param timeout the maximum time to wait
|
||||
* @param unit the time unit of the timeout argument
|
||||
*/
|
||||
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
|
||||
HadoopExecutors.shutdown(executor, logger, timeout, unit);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return String.format(Locale.ROOT, "ExecutorServiceFuturePool(executor=%s)", executor);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNegative;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkPositiveInteger;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkState;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkWithinRange;
|
||||
|
||||
/**
|
||||
* Provides functionality related to tracking the position within a file.
|
||||
*
|
||||
* The file is accessed through an in memory buffer. The absolute position within
|
||||
* the file is the sum of start offset of the buffer within the file and the relative
|
||||
* offset of the current access location within the buffer.
|
||||
*
|
||||
* A file is made up of equal sized blocks. The last block may be of a smaller size.
|
||||
* The size of a buffer associated with this file is typically the same as block size.
|
||||
*/
|
||||
public final class FilePosition {
|
||||
|
||||
/**
|
||||
* Holds block based information about a file.
|
||||
*/
|
||||
private BlockData blockData;
|
||||
|
||||
/**
|
||||
* Information about the buffer in use.
|
||||
*/
|
||||
private BufferData data;
|
||||
|
||||
/**
|
||||
* Provides access to the underlying file.
|
||||
*/
|
||||
private ByteBuffer buffer;
|
||||
|
||||
/**
|
||||
* Start offset of the buffer relative to the start of a file.
|
||||
*/
|
||||
private long bufferStartOffset;
|
||||
|
||||
/**
|
||||
* Offset where reading starts relative to the start of a file.
|
||||
*/
|
||||
private long readStartOffset;
|
||||
|
||||
// Read stats after a seek (mostly for debugging use).
|
||||
private int numSingleByteReads;
|
||||
|
||||
private int numBytesRead;
|
||||
|
||||
private int numBufferReads;
|
||||
|
||||
/**
|
||||
* Constructs an instance of {@link FilePosition}.
|
||||
*
|
||||
* @param fileSize size of the associated file.
|
||||
* @param blockSize size of each block within the file.
|
||||
*
|
||||
* @throws IllegalArgumentException if fileSize is negative.
|
||||
* @throws IllegalArgumentException if blockSize is zero or negative.
|
||||
*/
|
||||
public FilePosition(long fileSize, int blockSize) {
|
||||
checkNotNegative(fileSize, "fileSize");
|
||||
if (fileSize == 0) {
|
||||
checkNotNegative(blockSize, "blockSize");
|
||||
} else {
|
||||
checkPositiveInteger(blockSize, "blockSize");
|
||||
}
|
||||
|
||||
this.blockData = new BlockData(fileSize, blockSize);
|
||||
|
||||
// The position is valid only when a valid buffer is associated with this file.
|
||||
this.invalidate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Associates a buffer with this file.
|
||||
*
|
||||
* @param bufferData the buffer associated with this file.
|
||||
* @param startOffset Start offset of the buffer relative to the start of a file.
|
||||
* @param readOffset Offset where reading starts relative to the start of a file.
|
||||
*
|
||||
* @throws IllegalArgumentException if bufferData is null.
|
||||
* @throws IllegalArgumentException if startOffset is negative.
|
||||
* @throws IllegalArgumentException if readOffset is negative.
|
||||
* @throws IllegalArgumentException if readOffset is outside the range [startOffset, buffer end].
|
||||
*/
|
||||
public void setData(BufferData bufferData,
|
||||
long startOffset,
|
||||
long readOffset) {
|
||||
checkNotNull(bufferData, "bufferData");
|
||||
checkNotNegative(startOffset, "startOffset");
|
||||
checkNotNegative(readOffset, "readOffset");
|
||||
checkWithinRange(
|
||||
readOffset,
|
||||
"readOffset",
|
||||
startOffset,
|
||||
startOffset + bufferData.getBuffer().limit());
|
||||
|
||||
data = bufferData;
|
||||
buffer = bufferData.getBuffer().duplicate();
|
||||
bufferStartOffset = startOffset;
|
||||
readStartOffset = readOffset;
|
||||
setAbsolute(readOffset);
|
||||
|
||||
resetReadStats();
|
||||
}
|
||||
|
||||
public ByteBuffer buffer() {
|
||||
throwIfInvalidBuffer();
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public BufferData data() {
|
||||
throwIfInvalidBuffer();
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current absolute position within this file.
|
||||
*
|
||||
* @return the current absolute position within this file.
|
||||
*/
|
||||
public long absolute() {
|
||||
throwIfInvalidBuffer();
|
||||
return bufferStartOffset + relative();
|
||||
}
|
||||
|
||||
/**
|
||||
* If the given {@code pos} lies within the current buffer, updates the current position to
|
||||
* the specified value and returns true; otherwise returns false without changing the position.
|
||||
*
|
||||
* @param pos the absolute position to change the current position to if possible.
|
||||
* @return true if the given current position was updated, false otherwise.
|
||||
*/
|
||||
public boolean setAbsolute(long pos) {
|
||||
if (isValid() && isWithinCurrentBuffer(pos)) {
|
||||
int relativePos = (int) (pos - bufferStartOffset);
|
||||
buffer.position(relativePos);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current position within this file relative to the start of the associated buffer.
|
||||
*
|
||||
* @return the current position within this file relative to the start of the associated buffer.
|
||||
*/
|
||||
public int relative() {
|
||||
throwIfInvalidBuffer();
|
||||
return buffer.position();
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether the given absolute position lies within the current buffer.
|
||||
*
|
||||
* @param pos the position to check.
|
||||
* @return true if the given absolute position lies within the current buffer, false otherwise.
|
||||
*/
|
||||
public boolean isWithinCurrentBuffer(long pos) {
|
||||
throwIfInvalidBuffer();
|
||||
long bufferEndOffset = bufferStartOffset + buffer.limit();
|
||||
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the id of the current block.
|
||||
*
|
||||
* @return the id of the current block.
|
||||
*/
|
||||
public int blockNumber() {
|
||||
throwIfInvalidBuffer();
|
||||
return blockData.getBlockNumber(bufferStartOffset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether the current block is the last block in this file.
|
||||
*
|
||||
* @return true if the current block is the last block in this file, false otherwise.
|
||||
*/
|
||||
public boolean isLastBlock() {
|
||||
return blockData.isLastBlock(blockNumber());
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the current position is valid.
|
||||
*
|
||||
* @return true if the current position is valid, false otherwise.
|
||||
*/
|
||||
public boolean isValid() {
|
||||
return buffer != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the current position as invalid.
|
||||
*/
|
||||
public void invalidate() {
|
||||
buffer = null;
|
||||
bufferStartOffset = -1;
|
||||
data = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the start of the current block's absolute offset.
|
||||
*
|
||||
* @return the start of the current block's absolute offset.
|
||||
*/
|
||||
public long bufferStartOffset() {
|
||||
throwIfInvalidBuffer();
|
||||
return bufferStartOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether the current buffer has been fully read.
|
||||
*
|
||||
* @return true if the current buffer has been fully read, false otherwise.
|
||||
*/
|
||||
public boolean bufferFullyRead() {
|
||||
throwIfInvalidBuffer();
|
||||
return (bufferStartOffset == readStartOffset)
|
||||
&& (relative() == buffer.limit())
|
||||
&& (numBytesRead == buffer.limit());
|
||||
}
|
||||
|
||||
public void incrementBytesRead(int n) {
|
||||
numBytesRead += n;
|
||||
if (n == 1) {
|
||||
numSingleByteReads++;
|
||||
} else {
|
||||
numBufferReads++;
|
||||
}
|
||||
}
|
||||
|
||||
public int numBytesRead() {
|
||||
return numBytesRead;
|
||||
}
|
||||
|
||||
public int numSingleByteReads() {
|
||||
return numSingleByteReads;
|
||||
}
|
||||
|
||||
public int numBufferReads() {
|
||||
return numBufferReads;
|
||||
}
|
||||
|
||||
private void resetReadStats() {
|
||||
numBytesRead = 0;
|
||||
numSingleByteReads = 0;
|
||||
numBufferReads = 0;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (buffer == null) {
|
||||
sb.append("currentBuffer = null");
|
||||
} else {
|
||||
int pos = buffer.position();
|
||||
int val;
|
||||
if (pos >= buffer.limit()) {
|
||||
val = -1;
|
||||
} else {
|
||||
val = buffer.get(pos);
|
||||
}
|
||||
String currentBufferState =
|
||||
String.format("%d at pos: %d, lim: %d", val, pos, buffer.limit());
|
||||
sb.append(String.format(
|
||||
"block: %d, pos: %d (CBuf: %s)%n",
|
||||
blockNumber(), absolute(),
|
||||
currentBufferState));
|
||||
sb.append("\n");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void throwIfInvalidBuffer() {
|
||||
checkState(buffer != null, "'buffer' must not be null");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.time.Duration;
|
||||
|
||||
import org.apache.hadoop.fs.statistics.DurationTracker;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsSource;
|
||||
|
||||
public interface PrefetchingStatistics extends IOStatisticsSource {
|
||||
|
||||
/**
|
||||
* A prefetch operation has started.
|
||||
* @return duration tracker
|
||||
*/
|
||||
DurationTracker prefetchOperationStarted();
|
||||
|
||||
/**
|
||||
* A block has been saved to the file cache.
|
||||
*/
|
||||
void blockAddedToFileCache();
|
||||
|
||||
/**
|
||||
* A block has been removed from the file cache.
|
||||
*/
|
||||
void blockRemovedFromFileCache();
|
||||
|
||||
/**
|
||||
* A prefetch operation has completed.
|
||||
*/
|
||||
void prefetchOperationCompleted();
|
||||
|
||||
/**
|
||||
* An executor has been acquired, either for prefetching or caching.
|
||||
* @param timeInQueue time taken to acquire an executor.
|
||||
*/
|
||||
void executorAcquired(Duration timeInQueue);
|
||||
|
||||
/**
|
||||
* A new buffer has been added to the buffer pool.
|
||||
* @param size size of the new buffer
|
||||
*/
|
||||
void memoryAllocated(int size);
|
||||
|
||||
/**
|
||||
* Previously allocated memory has been freed.
|
||||
* @param size size of memory freed.
|
||||
*/
|
||||
void memoryFreed(int size);
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
||||
/**
|
||||
* Manages a fixed pool of resources.
|
||||
*
|
||||
* Avoids creating a new resource if a previously created instance is already available.
|
||||
*/
|
||||
public abstract class ResourcePool<T> implements Closeable {
|
||||
|
||||
/**
|
||||
* Acquires a resource blocking if necessary until one becomes available.
|
||||
*
|
||||
* @return the acquired resource instance.
|
||||
*/
|
||||
public abstract T acquire();
|
||||
|
||||
/**
|
||||
* Acquires a resource blocking if one is immediately available. Otherwise returns null.
|
||||
|
||||
* @return the acquired resource instance (if immediately available) or null.
|
||||
*/
|
||||
public abstract T tryAcquire();
|
||||
|
||||
/**
|
||||
* Releases a previously acquired resource.
|
||||
*
|
||||
* @param item the resource to release.
|
||||
*/
|
||||
public abstract void release(T item);
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived classes may implement a way to cleanup each item.
|
||||
*
|
||||
* @param item the resource to close.
|
||||
*/
|
||||
protected void close(T item) {
|
||||
// Do nothing in this class. Allow overriding classes to take any cleanup action.
|
||||
}
|
||||
|
||||
/**
|
||||
* Derived classes must implement a way to create an instance of a resource.
|
||||
*
|
||||
* @return the created instance.
|
||||
*/
|
||||
protected abstract T createNew();
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkGreater;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkPositiveInteger;
|
||||
|
||||
/**
|
||||
* Provides retry related functionality.
|
||||
*/
|
||||
public class Retryer {
|
||||
|
||||
/* Maximum amount of delay (in ms) before retry fails. */
|
||||
private int maxDelay;
|
||||
|
||||
/* Per retry delay (in ms). */
|
||||
private int perRetryDelay;
|
||||
|
||||
/**
|
||||
* The time interval (in ms) at which status update would be made.
|
||||
*/
|
||||
private int statusUpdateInterval;
|
||||
|
||||
/* Current delay. */
|
||||
private int delay;
|
||||
|
||||
/**
|
||||
* Initializes a new instance of the {@code Retryer} class.
|
||||
*
|
||||
* @param perRetryDelay per retry delay (in ms).
|
||||
* @param maxDelay maximum amount of delay (in ms) before retry fails.
|
||||
* @param statusUpdateInterval time interval (in ms) at which status update would be made.
|
||||
*
|
||||
* @throws IllegalArgumentException if perRetryDelay is zero or negative.
|
||||
* @throws IllegalArgumentException if maxDelay is less than or equal to perRetryDelay.
|
||||
* @throws IllegalArgumentException if statusUpdateInterval is zero or negative.
|
||||
*/
|
||||
public Retryer(int perRetryDelay, int maxDelay, int statusUpdateInterval) {
|
||||
checkPositiveInteger(perRetryDelay, "perRetryDelay");
|
||||
checkGreater(maxDelay, "maxDelay", perRetryDelay, "perRetryDelay");
|
||||
checkPositiveInteger(statusUpdateInterval, "statusUpdateInterval");
|
||||
|
||||
this.perRetryDelay = perRetryDelay;
|
||||
this.maxDelay = maxDelay;
|
||||
this.statusUpdateInterval = statusUpdateInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if retrying should continue, false otherwise.
|
||||
*
|
||||
* @return true if the caller should retry, false otherwise.
|
||||
*/
|
||||
public boolean continueRetry() {
|
||||
if (this.delay >= this.maxDelay) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(this.perRetryDelay);
|
||||
} catch (InterruptedException e) {
|
||||
// Ignore the exception as required by the semantic of this class;
|
||||
}
|
||||
|
||||
this.delay += this.perRetryDelay;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if status update interval has been reached.
|
||||
*
|
||||
* @return true if status update interval has been reached.
|
||||
*/
|
||||
public boolean updateStatus() {
|
||||
return (this.delay > 0) && this.delay % this.statusUpdateInterval == 0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,489 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.channels.WritableByteChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
|
||||
|
||||
/**
|
||||
* Provides functionality necessary for caching blocks of data read from FileSystem.
|
||||
* Each cache block is stored on the local disk as a separate file.
|
||||
*/
|
||||
public class SingleFilePerBlockCache implements BlockCache {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SingleFilePerBlockCache.class);
|
||||
|
||||
/**
|
||||
* Blocks stored in this cache.
|
||||
*/
|
||||
private final Map<Integer, Entry> blocks = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* Number of times a block was read from this cache.
|
||||
* Used for determining cache utilization factor.
|
||||
*/
|
||||
private int numGets = 0;
|
||||
|
||||
private boolean closed;
|
||||
|
||||
private final PrefetchingStatistics prefetchingStatistics;
|
||||
|
||||
/**
|
||||
* Timeout to be used by close, while acquiring prefetch block write lock.
|
||||
*/
|
||||
private static final int PREFETCH_WRITE_LOCK_TIMEOUT = 5;
|
||||
|
||||
/**
|
||||
* Lock timeout unit to be used by the thread while acquiring prefetch block write lock.
|
||||
*/
|
||||
private static final TimeUnit PREFETCH_WRITE_LOCK_TIMEOUT_UNIT = TimeUnit.SECONDS;
|
||||
|
||||
/**
|
||||
* File attributes attached to any intermediate temporary file created during index creation.
|
||||
*/
|
||||
private static final Set<PosixFilePermission> TEMP_FILE_ATTRS =
|
||||
ImmutableSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE);
|
||||
|
||||
/**
|
||||
* Cache entry.
|
||||
* Each block is stored as a separate file.
|
||||
*/
|
||||
private static final class Entry {
|
||||
private final int blockNumber;
|
||||
private final Path path;
|
||||
private final int size;
|
||||
private final long checksum;
|
||||
private final ReentrantReadWriteLock lock;
|
||||
private enum LockType {
|
||||
READ,
|
||||
WRITE
|
||||
}
|
||||
|
||||
Entry(int blockNumber, Path path, int size, long checksum) {
|
||||
this.blockNumber = blockNumber;
|
||||
this.path = path;
|
||||
this.size = size;
|
||||
this.checksum = checksum;
|
||||
this.lock = new ReentrantReadWriteLock();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format(
|
||||
"([%03d] %s: size = %d, checksum = %d)",
|
||||
blockNumber, path, size, checksum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Take the read or write lock.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
*/
|
||||
private void takeLock(LockType lockType) {
|
||||
if (LockType.READ == lockType) {
|
||||
lock.readLock().lock();
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
lock.writeLock().lock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the read or write lock.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
*/
|
||||
private void releaseLock(LockType lockType) {
|
||||
if (LockType.READ == lockType) {
|
||||
lock.readLock().unlock();
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to take the read or write lock within the given timeout.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
* @param timeout the time to wait for the given lock.
|
||||
* @param unit the time unit of the timeout argument.
|
||||
* @return true if the lock of the given lock type was acquired.
|
||||
*/
|
||||
private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) {
|
||||
try {
|
||||
if (LockType.READ == lockType) {
|
||||
return lock.readLock().tryLock(timeout, unit);
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
return lock.writeLock().tryLock(timeout, unit);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Thread interrupted while trying to acquire {} lock", lockType, e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an instance of a {@code SingleFilePerBlockCache}.
|
||||
*
|
||||
* @param prefetchingStatistics statistics for this stream.
|
||||
*/
|
||||
public SingleFilePerBlockCache(PrefetchingStatistics prefetchingStatistics) {
|
||||
this.prefetchingStatistics = requireNonNull(prefetchingStatistics);
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given block is in this cache.
|
||||
*/
|
||||
@Override
|
||||
public boolean containsBlock(int blockNumber) {
|
||||
return blocks.containsKey(blockNumber);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the blocks in this cache.
|
||||
*/
|
||||
@Override
|
||||
public Iterable<Integer> blocks() {
|
||||
return Collections.unmodifiableList(new ArrayList<>(blocks.keySet()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the number of blocks in this cache.
|
||||
*/
|
||||
@Override
|
||||
public int size() {
|
||||
return blocks.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the block having the given {@code blockNumber}.
|
||||
*
|
||||
* @throws IllegalArgumentException if buffer is null.
|
||||
*/
|
||||
@Override
|
||||
public void get(int blockNumber, ByteBuffer buffer) throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
checkNotNull(buffer, "buffer");
|
||||
|
||||
Entry entry = getEntry(blockNumber);
|
||||
entry.takeLock(Entry.LockType.READ);
|
||||
try {
|
||||
buffer.clear();
|
||||
readFile(entry.path, buffer);
|
||||
buffer.rewind();
|
||||
validateEntry(entry, buffer);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.READ);
|
||||
}
|
||||
}
|
||||
|
||||
protected int readFile(Path path, ByteBuffer buffer) throws IOException {
|
||||
int numBytesRead = 0;
|
||||
int numBytes;
|
||||
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
|
||||
while ((numBytes = channel.read(buffer)) > 0) {
|
||||
numBytesRead += numBytes;
|
||||
}
|
||||
buffer.limit(buffer.position());
|
||||
channel.close();
|
||||
return numBytesRead;
|
||||
}
|
||||
|
||||
private Entry getEntry(int blockNumber) {
|
||||
Validate.checkNotNegative(blockNumber, "blockNumber");
|
||||
|
||||
Entry entry = blocks.get(blockNumber);
|
||||
if (entry == null) {
|
||||
throw new IllegalStateException(String.format("block %d not found in cache", blockNumber));
|
||||
}
|
||||
numGets++;
|
||||
return entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts the given block in this cache.
|
||||
*
|
||||
* @param blockNumber the block number, used as a key for blocks map.
|
||||
* @param buffer buffer contents of the given block to be added to this cache.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @throws IOException if either local dir allocator fails to allocate file or if IO error
|
||||
* occurs while writing the buffer content to the file.
|
||||
* @throws IllegalArgumentException if buffer is null, or if buffer.limit() is zero or negative.
|
||||
*/
|
||||
@Override
|
||||
public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
checkNotNull(buffer, "buffer");
|
||||
|
||||
if (blocks.containsKey(blockNumber)) {
|
||||
Entry entry = blocks.get(blockNumber);
|
||||
entry.takeLock(Entry.LockType.READ);
|
||||
try {
|
||||
validateEntry(entry, buffer);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.READ);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
Validate.checkPositiveInteger(buffer.limit(), "buffer.limit()");
|
||||
|
||||
Path blockFilePath = getCacheFilePath(conf, localDirAllocator);
|
||||
long size = Files.size(blockFilePath);
|
||||
if (size != 0) {
|
||||
String message =
|
||||
String.format("[%d] temp file already has data. %s (%d)",
|
||||
blockNumber, blockFilePath, size);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
|
||||
writeFile(blockFilePath, buffer);
|
||||
long checksum = BufferData.getChecksum(buffer);
|
||||
Entry entry = new Entry(blockNumber, blockFilePath, buffer.limit(), checksum);
|
||||
blocks.put(blockNumber, entry);
|
||||
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
|
||||
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
|
||||
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
|
||||
// the input stream can lead to the removal of the cache file even before blocks is added with
|
||||
// the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
|
||||
prefetchingStatistics.blockAddedToFileCache();
|
||||
}
|
||||
|
||||
private static final Set<? extends OpenOption> CREATE_OPTIONS =
|
||||
EnumSet.of(StandardOpenOption.WRITE,
|
||||
StandardOpenOption.CREATE,
|
||||
StandardOpenOption.TRUNCATE_EXISTING);
|
||||
|
||||
protected void writeFile(Path path, ByteBuffer buffer) throws IOException {
|
||||
buffer.rewind();
|
||||
WritableByteChannel writeChannel = Files.newByteChannel(path, CREATE_OPTIONS);
|
||||
while (buffer.hasRemaining()) {
|
||||
writeChannel.write(buffer);
|
||||
}
|
||||
writeChannel.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return temporary file created based on the file path retrieved from local dir allocator.
|
||||
*
|
||||
* @param conf The configuration object.
|
||||
* @param localDirAllocator Local dir allocator instance.
|
||||
* @return Path of the temporary file created.
|
||||
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
|
||||
* from local FS or file creation fails or permission set fails.
|
||||
*/
|
||||
protected Path getCacheFilePath(final Configuration conf,
|
||||
final LocalDirAllocator localDirAllocator)
|
||||
throws IOException {
|
||||
return getTempFilePath(conf, localDirAllocator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
closed = true;
|
||||
|
||||
LOG.info(getStats());
|
||||
int numFilesDeleted = 0;
|
||||
|
||||
for (Entry entry : blocks.values()) {
|
||||
boolean lockAcquired = entry.takeLock(Entry.LockType.WRITE, PREFETCH_WRITE_LOCK_TIMEOUT,
|
||||
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
|
||||
if (!lockAcquired) {
|
||||
LOG.error("Cache file {} deletion would not be attempted as write lock could not"
|
||||
+ " be acquired within {} {}", entry.path, PREFETCH_WRITE_LOCK_TIMEOUT,
|
||||
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Files.deleteIfExists(entry.path);
|
||||
prefetchingStatistics.blockRemovedFromFileCache();
|
||||
numFilesDeleted++;
|
||||
} catch (IOException e) {
|
||||
LOG.debug("Failed to delete cache file {}", entry.path, e);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
if (numFilesDeleted > 0) {
|
||||
LOG.info("Deleted {} cache files", numFilesDeleted);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("stats: ");
|
||||
sb.append(getStats());
|
||||
sb.append(", blocks:[");
|
||||
sb.append(getIntList(blocks()));
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void validateEntry(Entry entry, ByteBuffer buffer) {
|
||||
if (entry.size != buffer.limit()) {
|
||||
String message = String.format(
|
||||
"[%d] entry.size(%d) != buffer.limit(%d)",
|
||||
entry.blockNumber, entry.size, buffer.limit());
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
|
||||
long checksum = BufferData.getChecksum(buffer);
|
||||
if (entry.checksum != checksum) {
|
||||
String message = String.format(
|
||||
"[%d] entry.checksum(%d) != buffer checksum(%d)",
|
||||
entry.blockNumber, entry.checksum, checksum);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Produces a human readable list of blocks for the purpose of logging.
|
||||
* This method minimizes the length of returned list by converting
|
||||
* a contiguous list of blocks into a range.
|
||||
* for example,
|
||||
* 1, 3, 4, 5, 6, 8 becomes 1, 3~6, 8
|
||||
*/
|
||||
private String getIntList(Iterable<Integer> nums) {
|
||||
List<String> numList = new ArrayList<>();
|
||||
List<Integer> numbers = new ArrayList<Integer>();
|
||||
for (Integer n : nums) {
|
||||
numbers.add(n);
|
||||
}
|
||||
Collections.sort(numbers);
|
||||
|
||||
int index = 0;
|
||||
while (index < numbers.size()) {
|
||||
int start = numbers.get(index);
|
||||
int prev = start;
|
||||
int end = start;
|
||||
while ((++index < numbers.size()) && ((end = numbers.get(index)) == prev + 1)) {
|
||||
prev = end;
|
||||
}
|
||||
|
||||
if (start == prev) {
|
||||
numList.add(Integer.toString(start));
|
||||
} else {
|
||||
numList.add(String.format("%d~%d", start, prev));
|
||||
}
|
||||
}
|
||||
|
||||
return String.join(", ", numList);
|
||||
}
|
||||
|
||||
private String getStats() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(String.format(
|
||||
"#entries = %d, #gets = %d",
|
||||
blocks.size(), numGets));
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static final String CACHE_FILE_PREFIX = "fs-cache-";
|
||||
|
||||
/**
|
||||
* Determine if the cache space is available on the local FS.
|
||||
*
|
||||
* @param fileSize The size of the file.
|
||||
* @param conf The configuration.
|
||||
* @param localDirAllocator Local dir allocator instance.
|
||||
* @return True if the given file size is less than the available free space on local FS,
|
||||
* False otherwise.
|
||||
*/
|
||||
public static boolean isCacheSpaceAvailable(long fileSize, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) {
|
||||
try {
|
||||
Path cacheFilePath = getTempFilePath(conf, localDirAllocator);
|
||||
long freeSpace = new File(cacheFilePath.toString()).getUsableSpace();
|
||||
LOG.info("fileSize = {}, freeSpace = {}", fileSize, freeSpace);
|
||||
Files.deleteIfExists(cacheFilePath);
|
||||
return fileSize < freeSpace;
|
||||
} catch (IOException e) {
|
||||
LOG.error("isCacheSpaceAvailable", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The suffix (file extension) of each serialized index file.
|
||||
private static final String BINARY_FILE_SUFFIX = ".bin";
|
||||
|
||||
/**
|
||||
* Create temporary file based on the file path retrieved from local dir allocator
|
||||
* instance. The file is created with .bin suffix. The created file has been granted
|
||||
* posix file permissions available in TEMP_FILE_ATTRS.
|
||||
*
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @return path of the file created.
|
||||
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
|
||||
* from local FS or file creation fails or permission set fails.
|
||||
*/
|
||||
private static Path getTempFilePath(final Configuration conf,
|
||||
final LocalDirAllocator localDirAllocator) throws IOException {
|
||||
org.apache.hadoop.fs.Path path =
|
||||
localDirAllocator.getLocalPathForWrite(CACHE_FILE_PREFIX, conf);
|
||||
File dir = new File(path.getParent().toUri().getPath());
|
||||
String prefix = path.getName();
|
||||
File tmpFile = File.createTempFile(prefix, BINARY_FILE_SUFFIX, dir);
|
||||
Path tmpFilePath = Paths.get(tmpFile.toURI());
|
||||
return Files.setPosixFilePermissions(tmpFilePath, TEMP_FILE_ATTRS);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,399 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.apache.hadoop.util.Preconditions.checkArgument;
|
||||
|
||||
/**
|
||||
* A superset of Validate class in Apache commons lang3.
|
||||
* <p>
|
||||
* It provides consistent message strings for frequently encountered checks.
|
||||
* That simplifies callers because they have to supply only the name of the argument
|
||||
* that failed a check instead of having to supply the entire message.
|
||||
*/
|
||||
public final class Validate {
|
||||
|
||||
private Validate() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given reference argument is not null.
|
||||
* @param obj the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNull(Object obj, String argName) {
|
||||
checkArgument(obj != null, "'%s' must not be null.", argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given integer argument is not zero or negative.
|
||||
* @param value the argument value to validate
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkPositiveInteger(long value, String argName) {
|
||||
checkArgument(value > 0, "'%s' must be a positive integer.", argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given integer argument is not negative.
|
||||
* @param value the argument value to validate
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNegative(long value, String argName) {
|
||||
checkArgument(value >= 0, "'%s' must not be negative.", argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the expression (that checks a required field is present) is true.
|
||||
* @param isPresent indicates whether the given argument is present.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkRequired(boolean isPresent, String argName) {
|
||||
checkArgument(isPresent, "'%s' is required.", argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the expression (that checks a field is valid) is true.
|
||||
* @param isValid indicates whether the given argument is valid.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkValid(boolean isValid, String argName) {
|
||||
checkArgument(isValid, "'%s' is invalid.", argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the expression (that checks a field is valid) is true.
|
||||
* @param isValid indicates whether the given argument is valid.
|
||||
* @param argName the name of the argument being validated.
|
||||
* @param validValues the list of values that are allowed.
|
||||
*/
|
||||
public static void checkValid(boolean isValid,
|
||||
String argName,
|
||||
String validValues) {
|
||||
checkArgument(isValid, "'%s' is invalid. Valid values are: %s.", argName,
|
||||
validValues);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given string is not null and has non-zero length.
|
||||
* @param arg the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNullAndNotEmpty(String arg, String argName) {
|
||||
checkNotNull(arg, argName);
|
||||
checkArgument(
|
||||
!arg.isEmpty(),
|
||||
"'%s' must not be empty.",
|
||||
argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given array is not null and has at least one element.
|
||||
* @param <T> the type of array's elements.
|
||||
* @param array the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static <T> void checkNotNullAndNotEmpty(T[] array, String argName) {
|
||||
checkNotNull(array, argName);
|
||||
checkNotEmpty(array.length, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given array is not null and has at least one element.
|
||||
* @param array the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNullAndNotEmpty(byte[] array, String argName) {
|
||||
checkNotNull(array, argName);
|
||||
checkNotEmpty(array.length, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given array is not null and has at least one element.
|
||||
* @param array the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNullAndNotEmpty(short[] array, String argName) {
|
||||
checkNotNull(array, argName);
|
||||
checkNotEmpty(array.length, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given array is not null and has at least one element.
|
||||
* @param array the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNullAndNotEmpty(int[] array, String argName) {
|
||||
checkNotNull(array, argName);
|
||||
checkNotEmpty(array.length, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given array is not null and has at least one element.
|
||||
* @param array the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkNotNullAndNotEmpty(long[] array, String argName) {
|
||||
checkNotNull(array, argName);
|
||||
checkNotEmpty(array.length, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given buffer is not null and has non-zero capacity.
|
||||
* @param <T> the type of iterable's elements.
|
||||
* @param iter the argument reference to validate.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static <T> void checkNotNullAndNotEmpty(Iterable<T> iter,
|
||||
String argName) {
|
||||
checkNotNull(iter, argName);
|
||||
int minNumElements = iter.iterator().hasNext() ? 1 : 0;
|
||||
checkNotEmpty(minNumElements, argName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given set is not null and has an exact number of items.
|
||||
* @param <T> the type of collection's elements.
|
||||
* @param collection the argument reference to validate.
|
||||
* @param numElements the expected number of elements in the collection.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static <T> void checkNotNullAndNumberOfElements(
|
||||
Collection<T> collection, int numElements, String argName) {
|
||||
checkNotNull(collection, argName);
|
||||
checkArgument(
|
||||
collection.size() == numElements,
|
||||
"Number of elements in '%s' must be exactly %s, %s given.",
|
||||
argName,
|
||||
numElements,
|
||||
collection.size()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given two values are equal.
|
||||
* @param value1 the first value to check.
|
||||
* @param value1Name the name of the first argument.
|
||||
* @param value2 the second value to check.
|
||||
* @param value2Name the name of the second argument.
|
||||
*/
|
||||
public static void checkValuesEqual(
|
||||
long value1,
|
||||
String value1Name,
|
||||
long value2,
|
||||
String value2Name) {
|
||||
checkArgument(
|
||||
value1 == value2,
|
||||
"'%s' (%s) must equal '%s' (%s).",
|
||||
value1Name,
|
||||
value1,
|
||||
value2Name,
|
||||
value2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the first value is an integer multiple of the second value.
|
||||
* @param value1 the first value to check.
|
||||
* @param value1Name the name of the first argument.
|
||||
* @param value2 the second value to check.
|
||||
* @param value2Name the name of the second argument.
|
||||
*/
|
||||
public static void checkIntegerMultiple(
|
||||
long value1,
|
||||
String value1Name,
|
||||
long value2,
|
||||
String value2Name) {
|
||||
checkArgument(
|
||||
(value1 % value2) == 0,
|
||||
"'%s' (%s) must be an integer multiple of '%s' (%s).",
|
||||
value1Name,
|
||||
value1,
|
||||
value2Name,
|
||||
value2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the first value is greater than the second value.
|
||||
* @param value1 the first value to check.
|
||||
* @param value1Name the name of the first argument.
|
||||
* @param value2 the second value to check.
|
||||
* @param value2Name the name of the second argument.
|
||||
*/
|
||||
public static void checkGreater(
|
||||
long value1,
|
||||
String value1Name,
|
||||
long value2,
|
||||
String value2Name) {
|
||||
checkArgument(
|
||||
value1 > value2,
|
||||
"'%s' (%s) must be greater than '%s' (%s).",
|
||||
value1Name,
|
||||
value1,
|
||||
value2Name,
|
||||
value2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the first value is greater than or equal to the second value.
|
||||
* @param value1 the first value to check.
|
||||
* @param value1Name the name of the first argument.
|
||||
* @param value2 the second value to check.
|
||||
* @param value2Name the name of the second argument.
|
||||
*/
|
||||
public static void checkGreaterOrEqual(
|
||||
long value1,
|
||||
String value1Name,
|
||||
long value2,
|
||||
String value2Name) {
|
||||
checkArgument(
|
||||
value1 >= value2,
|
||||
"'%s' (%s) must be greater than or equal to '%s' (%s).",
|
||||
value1Name,
|
||||
value1,
|
||||
value2Name,
|
||||
value2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the first value is less than or equal to the second value.
|
||||
* @param value1 the first value to check.
|
||||
* @param value1Name the name of the first argument.
|
||||
* @param value2 the second value to check.
|
||||
* @param value2Name the name of the second argument.
|
||||
*/
|
||||
public static void checkLessOrEqual(
|
||||
long value1,
|
||||
String value1Name,
|
||||
long value2,
|
||||
String value2Name) {
|
||||
checkArgument(
|
||||
value1 <= value2,
|
||||
"'%s' (%s) must be less than or equal to '%s' (%s).",
|
||||
value1Name,
|
||||
value1,
|
||||
value2Name,
|
||||
value2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given value is within the given range of values.
|
||||
* @param value the value to check.
|
||||
* @param valueName the name of the argument.
|
||||
* @param minValueInclusive inclusive lower limit for the value.
|
||||
* @param maxValueInclusive inclusive upper limit for the value.
|
||||
*/
|
||||
public static void checkWithinRange(
|
||||
long value,
|
||||
String valueName,
|
||||
long minValueInclusive,
|
||||
long maxValueInclusive) {
|
||||
checkArgument(
|
||||
(value >= minValueInclusive) && (value <= maxValueInclusive),
|
||||
"'%s' (%s) must be within the range [%s, %s].",
|
||||
valueName,
|
||||
value,
|
||||
minValueInclusive,
|
||||
maxValueInclusive);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given value is within the given range of values.
|
||||
* @param value the value to check.
|
||||
* @param valueName the name of the argument.
|
||||
* @param minValueInclusive inclusive lower limit for the value.
|
||||
* @param maxValueInclusive inclusive upper limit for the value.
|
||||
*/
|
||||
public static void checkWithinRange(
|
||||
double value,
|
||||
String valueName,
|
||||
double minValueInclusive,
|
||||
double maxValueInclusive) {
|
||||
checkArgument(
|
||||
(value >= minValueInclusive) && (value <= maxValueInclusive),
|
||||
"'%s' (%s) must be within the range [%s, %s].",
|
||||
valueName,
|
||||
value,
|
||||
minValueInclusive,
|
||||
maxValueInclusive);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given path exists.
|
||||
* @param path the path to check.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkPathExists(Path path, String argName) {
|
||||
checkNotNull(path, argName);
|
||||
checkArgument(Files.exists(path), "Path %s (%s) does not exist.", argName,
|
||||
path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given path exists and is a directory.
|
||||
* @param path the path to check.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkPathExistsAsDir(Path path, String argName) {
|
||||
checkPathExists(path, argName);
|
||||
checkArgument(
|
||||
Files.isDirectory(path),
|
||||
"Path %s (%s) must point to a directory.",
|
||||
argName,
|
||||
path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that the given path exists and is a file.
|
||||
* @param path the path to check.
|
||||
* @param argName the name of the argument being validated.
|
||||
*/
|
||||
public static void checkPathExistsAsFile(Path path, String argName) {
|
||||
checkPathExists(path, argName);
|
||||
checkArgument(Files.isRegularFile(path),
|
||||
"Path %s (%s) must point to a file.", argName, path);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Check state.
|
||||
* @param expression expression which must hold.
|
||||
* @param format format string
|
||||
* @param args arguments for the error string
|
||||
* @throws IllegalStateException if the state is not valid.
|
||||
*/
|
||||
public static void checkState(boolean expression,
|
||||
String format,
|
||||
Object... args) {
|
||||
if (!expression) {
|
||||
throw new IllegalStateException(String.format(format, args));
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkNotEmpty(int arraySize, String argName) {
|
||||
checkArgument(
|
||||
arraySize > 0,
|
||||
"'%s' must have at least one element.",
|
||||
argName);
|
||||
}
|
||||
}
|
|
@ -15,17 +15,14 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.swift;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
|
||||
/**
|
||||
* A path filter that accepts everything
|
||||
* block caching for use in object store clients.
|
||||
*/
|
||||
public class AcceptAllFilter implements PathFilter {
|
||||
@Override
|
||||
public boolean accept(Path file) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.impl.prefetch;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
|
@ -15,6 +15,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Filesystem implementations that allow Hadoop to read directly from
|
||||
* the local file system.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.local;
|
||||
|
|
|
@ -333,15 +333,24 @@ class CopyCommands {
|
|||
*/
|
||||
public static class AppendToFile extends CommandWithDestination {
|
||||
public static final String NAME = "appendToFile";
|
||||
public static final String USAGE = "<localsrc> ... <dst>";
|
||||
public static final String USAGE = "[-n] <localsrc> ... <dst>";
|
||||
public static final String DESCRIPTION =
|
||||
"Appends the contents of all the given local files to the " +
|
||||
"given dst file. The dst file will be created if it does " +
|
||||
"not exist. If <localSrc> is -, then the input is read " +
|
||||
"from stdin.";
|
||||
"from stdin. Option -n represents that use NEW_BLOCK create flag to append file.";
|
||||
|
||||
private static final int DEFAULT_IO_LENGTH = 1024 * 1024;
|
||||
boolean readStdin = false;
|
||||
private boolean appendToNewBlock = false;
|
||||
|
||||
public boolean isAppendToNewBlock() {
|
||||
return appendToNewBlock;
|
||||
}
|
||||
|
||||
public void setAppendToNewBlock(boolean appendToNewBlock) {
|
||||
this.appendToNewBlock = appendToNewBlock;
|
||||
}
|
||||
|
||||
// commands operating on local paths have no need for glob expansion
|
||||
@Override
|
||||
|
@ -372,6 +381,9 @@ class CopyCommands {
|
|||
throw new IOException("missing destination argument");
|
||||
}
|
||||
|
||||
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "n");
|
||||
cf.parse(args);
|
||||
appendToNewBlock = cf.getOpt("n");
|
||||
getRemoteDestination(args);
|
||||
super.processOptions(args);
|
||||
}
|
||||
|
@ -385,7 +397,8 @@ class CopyCommands {
|
|||
}
|
||||
|
||||
InputStream is = null;
|
||||
try (FSDataOutputStream fos = dst.fs.append(dst.path)) {
|
||||
try (FSDataOutputStream fos = appendToNewBlock ?
|
||||
dst.fs.append(dst.path, true) : dst.fs.append(dst.path)) {
|
||||
if (readStdin) {
|
||||
if (args.size() == 0) {
|
||||
IOUtils.copyBytes(System.in, fos, DEFAULT_IO_LENGTH);
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.io.DataInputBuffer;
|
|||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -217,8 +216,8 @@ class Display extends FsCommand {
|
|||
|
||||
protected class TextRecordInputStream extends InputStream {
|
||||
SequenceFile.Reader r;
|
||||
Writable key;
|
||||
Writable val;
|
||||
Object key;
|
||||
Object val;
|
||||
|
||||
DataInputBuffer inbuf;
|
||||
DataOutputBuffer outbuf;
|
||||
|
@ -228,10 +227,8 @@ class Display extends FsCommand {
|
|||
final Configuration lconf = getConf();
|
||||
r = new SequenceFile.Reader(lconf,
|
||||
SequenceFile.Reader.file(fpath));
|
||||
key = ReflectionUtils.newInstance(
|
||||
r.getKeyClass().asSubclass(Writable.class), lconf);
|
||||
val = ReflectionUtils.newInstance(
|
||||
r.getValueClass().asSubclass(Writable.class), lconf);
|
||||
key = ReflectionUtils.newInstance(r.getKeyClass(), lconf);
|
||||
val = ReflectionUtils.newInstance(r.getValueClass(), lconf);
|
||||
inbuf = new DataInputBuffer();
|
||||
outbuf = new DataOutputBuffer();
|
||||
}
|
||||
|
@ -240,8 +237,11 @@ class Display extends FsCommand {
|
|||
public int read() throws IOException {
|
||||
int ret;
|
||||
if (null == inbuf || -1 == (ret = inbuf.read())) {
|
||||
if (!r.next(key, val)) {
|
||||
key = r.next(key);
|
||||
if (key == null) {
|
||||
return -1;
|
||||
} else {
|
||||
val = r.getCurrentValue(val);
|
||||
}
|
||||
byte[] tmp = key.toString().getBytes(StandardCharsets.UTF_8);
|
||||
outbuf.write(tmp, 0, tmp.length);
|
||||
|
|
|
@ -633,7 +633,7 @@ public class PathData implements Comparable<PathData> {
|
|||
return awaitFuture(fs.openFile(path)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
policy)
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
stat.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
}
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for the execution of a file system command.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.fs.statistics;
|
|||
|
||||
import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextIntegration;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* An interface defined to capture thread-level IOStatistics by using per
|
||||
* thread context.
|
||||
|
@ -67,7 +69,11 @@ public interface IOStatisticsContext extends IOStatisticsSource {
|
|||
* @return instance of IOStatisticsContext for the context.
|
||||
*/
|
||||
static IOStatisticsContext getCurrentIOStatisticsContext() {
|
||||
return IOStatisticsContextIntegration.getCurrentIOStatisticsContext();
|
||||
// the null check is just a safety check to highlight exactly where a null value would
|
||||
// be returned if HADOOP-18456 has resurfaced.
|
||||
return requireNonNull(
|
||||
IOStatisticsContextIntegration.getCurrentIOStatisticsContext(),
|
||||
"Null IOStatisticsContext");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -80,4 +86,14 @@ public interface IOStatisticsContext extends IOStatisticsSource {
|
|||
IOStatisticsContextIntegration.setThreadIOStatisticsContext(
|
||||
statisticsContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static probe to check if the thread-level IO statistics enabled.
|
||||
*
|
||||
* @return if the thread-level IO statistics enabled.
|
||||
*/
|
||||
static boolean enabled() {
|
||||
return IOStatisticsContextIntegration.isIOStatisticsThreadLevelEnabled();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.statistics;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Setter for IOStatistics entries.
|
||||
* These operations have been in the read/write API
|
||||
* {@code IOStatisticsStore} since IOStatistics
|
||||
* was added; extracting into its own interface allows for
|
||||
* {@link IOStatisticsSnapshot} to also support it.
|
||||
* These are the simple setters, they don't provide for increments,
|
||||
* decrements, calculation of min/max/mean etc.
|
||||
* @since The interface and IOStatisticsSnapshot support was added <i>after</i> Hadoop 3.3.5
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface IOStatisticsSetters extends IOStatistics {
|
||||
|
||||
/**
|
||||
* Set a counter.
|
||||
*
|
||||
* No-op if the counter is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a gauge.
|
||||
*
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a maximum.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a minimum.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMinimum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a mean statistic to a given value.
|
||||
* @param key statistic key
|
||||
* @param value new value.
|
||||
*/
|
||||
void setMeanStatistic(String key, MeanStatistic value);
|
||||
}
|
|
@ -62,7 +62,8 @@ import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.snapshotM
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class IOStatisticsSnapshot
|
||||
implements IOStatistics, Serializable, IOStatisticsAggregator {
|
||||
implements IOStatistics, Serializable, IOStatisticsAggregator,
|
||||
IOStatisticsSetters {
|
||||
|
||||
private static final long serialVersionUID = -1762522703841538084L;
|
||||
|
||||
|
@ -222,6 +223,33 @@ public final class IOStatisticsSnapshot
|
|||
return meanStatistics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setCounter(final String key, final long value) {
|
||||
counters().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setGauge(final String key, final long value) {
|
||||
gauges().put(key, value);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setMaximum(final String key, final long value) {
|
||||
maximums().put(key, value);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setMinimum(final String key, final long value) {
|
||||
minimums().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMeanStatistic(final String key, final MeanStatistic value) {
|
||||
meanStatistics().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return ioStatisticsToString(this);
|
||||
|
|
|
@ -47,7 +47,7 @@ public final class StreamStatisticNames {
|
|||
public static final String STREAM_READ_ABORTED = "stream_aborted";
|
||||
|
||||
/**
|
||||
* Bytes read from an input stream in read() calls.
|
||||
* Bytes read from an input stream in read()/readVectored() calls.
|
||||
* Does not include bytes read and then discarded in seek/close etc.
|
||||
* These are the bytes returned to the caller.
|
||||
* Value: {@value}.
|
||||
|
@ -110,6 +110,34 @@ public final class StreamStatisticNames {
|
|||
public static final String STREAM_READ_OPERATIONS =
|
||||
"stream_read_operations";
|
||||
|
||||
/**
|
||||
* Count of readVectored() operations in an input stream.
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_OPERATIONS =
|
||||
"stream_read_vectored_operations";
|
||||
|
||||
/**
|
||||
* Count of bytes discarded during readVectored() operation
|
||||
* in an input stream.
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_READ_BYTES_DISCARDED =
|
||||
"stream_read_vectored_read_bytes_discarded";
|
||||
|
||||
/**
|
||||
* Count of incoming file ranges during readVectored() operation.
|
||||
* Value: {@value}
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_INCOMING_RANGES =
|
||||
"stream_read_vectored_incoming_ranges";
|
||||
/**
|
||||
* Count of combined file ranges during readVectored() operation.
|
||||
* Value: {@value}
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_COMBINED_RANGES =
|
||||
"stream_read_vectored_combined_ranges";
|
||||
|
||||
/**
|
||||
* Count of incomplete read() operations in an input stream,
|
||||
* that is, when the bytes returned were less than that requested.
|
||||
|
@ -387,6 +415,46 @@ public final class StreamStatisticNames {
|
|||
public static final String BLOCKS_RELEASED
|
||||
= "blocks_released";
|
||||
|
||||
/**
|
||||
* Total number of prefetching operations executed.
|
||||
*/
|
||||
public static final String STREAM_READ_PREFETCH_OPERATIONS
|
||||
= "stream_read_prefetch_operations";
|
||||
|
||||
/**
|
||||
* Total number of block in disk cache.
|
||||
*/
|
||||
public static final String STREAM_READ_BLOCKS_IN_FILE_CACHE
|
||||
= "stream_read_blocks_in_cache";
|
||||
|
||||
/**
|
||||
* Total number of active prefetch operations.
|
||||
*/
|
||||
public static final String STREAM_READ_ACTIVE_PREFETCH_OPERATIONS
|
||||
= "stream_read_active_prefetch_operations";
|
||||
|
||||
/**
|
||||
* Total bytes of memory in use by this input stream.
|
||||
*/
|
||||
public static final String STREAM_READ_ACTIVE_MEMORY_IN_USE
|
||||
= "stream_read_active_memory_in_use";
|
||||
|
||||
/**
|
||||
* count/duration of reading a remote block.
|
||||
*
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_REMOTE_BLOCK_READ
|
||||
= "stream_read_block_read";
|
||||
|
||||
/**
|
||||
* count/duration of acquiring a buffer and reading to it.
|
||||
*
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_BLOCK_ACQUIRE_AND_READ
|
||||
= "stream_read_block_acquire_read";
|
||||
|
||||
private StreamStatisticNames() {
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.s3a.statistics.impl;
|
||||
package org.apache.hadoop.fs.statistics.impl;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.time.Duration;
|
||||
|
@ -25,7 +25,6 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.hadoop.fs.statistics.IOStatistics;
|
||||
import org.apache.hadoop.fs.statistics.MeanStatistic;
|
||||
import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore;
|
||||
|
||||
/**
|
||||
* This may seem odd having an IOStatisticsStore which does nothing
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.impl.WeakReferenceThreadMap;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsContext;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.THREAD_LEVEL_IOSTATISTICS_ENABLED;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_THREAD_LEVEL_ENABLED;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT;
|
||||
|
||||
/**
|
||||
* A Utility class for IOStatisticsContext, which helps in creating and
|
||||
|
@ -76,8 +76,17 @@ public final class IOStatisticsContextIntegration {
|
|||
// Work out if the current context has thread level IOStatistics enabled.
|
||||
final Configuration configuration = new Configuration();
|
||||
isThreadIOStatsEnabled =
|
||||
configuration.getBoolean(THREAD_LEVEL_IOSTATISTICS_ENABLED,
|
||||
THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT);
|
||||
configuration.getBoolean(IOSTATISTICS_THREAD_LEVEL_ENABLED,
|
||||
IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static probe to check if the thread-level IO statistics enabled.
|
||||
*
|
||||
* @return if the thread-level IO statistics enabled.
|
||||
*/
|
||||
public static boolean isIOStatisticsThreadLevelEnabled() {
|
||||
return isThreadIOStatsEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -91,7 +100,10 @@ public final class IOStatisticsContextIntegration {
|
|||
* @return an instance of IOStatisticsContext.
|
||||
*/
|
||||
private static IOStatisticsContext createNewInstance(Long key) {
|
||||
return new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
|
||||
IOStatisticsContextImpl instance =
|
||||
new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
|
||||
LOG.debug("Created instance {}", instance);
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,9 +134,11 @@ public final class IOStatisticsContextIntegration {
|
|||
IOStatisticsContext statisticsContext) {
|
||||
if (isThreadIOStatsEnabled) {
|
||||
if (statisticsContext == null) {
|
||||
// new value is null, so remove it
|
||||
ACTIVE_IOSTATS_CONTEXT.removeForCurrentThread();
|
||||
}
|
||||
if (ACTIVE_IOSTATS_CONTEXT.getForCurrentThread() != statisticsContext) {
|
||||
} else {
|
||||
// the setter is efficient in that it does not create a new
|
||||
// reference if the context is unchanged.
|
||||
ACTIVE_IOSTATS_CONTEXT.setForCurrentThread(statisticsContext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import org.apache.hadoop.fs.statistics.IOStatistics;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsAggregator;
|
||||
import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsSetters;
|
||||
import org.apache.hadoop.fs.statistics.MeanStatistic;
|
||||
|
||||
/**
|
||||
|
@ -31,6 +32,7 @@ import org.apache.hadoop.fs.statistics.MeanStatistic;
|
|||
* use in classes which track statistics for reporting.
|
||||
*/
|
||||
public interface IOStatisticsStore extends IOStatistics,
|
||||
IOStatisticsSetters,
|
||||
IOStatisticsAggregator,
|
||||
DurationTrackerFactory {
|
||||
|
||||
|
@ -56,24 +58,6 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a counter.
|
||||
*
|
||||
* No-op if the counter is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a gauge.
|
||||
*
|
||||
* No-op if the gauge is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a gauge.
|
||||
* <p>
|
||||
|
@ -85,14 +69,6 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a maximum.
|
||||
* No-op if the maximum is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a maximum.
|
||||
* <p>
|
||||
|
@ -104,16 +80,6 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a minimum.
|
||||
* <p>
|
||||
* No-op if the minimum is unknown.
|
||||
* </p>
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMinimum(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a minimum.
|
||||
* <p>
|
||||
|
@ -147,16 +113,6 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
void addMaximumSample(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a mean statistic to a given value.
|
||||
* <p>
|
||||
* No-op if the key is unknown.
|
||||
* </p>
|
||||
* @param key statistic key
|
||||
* @param value new value.
|
||||
*/
|
||||
void setMeanStatistic(String key, MeanStatistic value);
|
||||
|
||||
/**
|
||||
* Add a sample to the mean statistics.
|
||||
* <p>
|
||||
|
|
|
@ -67,6 +67,17 @@ public interface IOStatisticsStoreBuilder {
|
|||
IOStatisticsStoreBuilder withDurationTracking(
|
||||
String... prefixes);
|
||||
|
||||
/**
|
||||
* A value which is tracked with counter/min/max/mean.
|
||||
* Similar to {@link #withDurationTracking(String...)}
|
||||
* but without the failure option and with the same name
|
||||
* across all categories.
|
||||
* @param prefixes prefixes to add.
|
||||
* @return the builder
|
||||
*/
|
||||
IOStatisticsStoreBuilder withSampleTracking(
|
||||
String... prefixes);
|
||||
|
||||
/**
|
||||
* Build the collector.
|
||||
* @return a new collector.
|
||||
|
|
|
@ -92,6 +92,18 @@ final class IOStatisticsStoreBuilderImpl implements
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IOStatisticsStoreBuilderImpl withSampleTracking(
|
||||
final String... prefixes) {
|
||||
for (String p : prefixes) {
|
||||
withCounters(p);
|
||||
withMinimums(p);
|
||||
withMaximums(p);
|
||||
withMeanStatistics(p);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IOStatisticsStore build() {
|
||||
return new IOStatisticsStoreImpl(counters, gauges, minimums,
|
||||
|
|
|
@ -190,7 +190,7 @@ final class IOStatisticsStoreImpl extends WrappedIOStatistics
|
|||
return counter.get();
|
||||
} else {
|
||||
long l = incAtomicLong(counter, value);
|
||||
LOG.debug("Incrementing counter {} by {} with final value {}",
|
||||
LOG.trace("Incrementing counter {} by {} with final value {}",
|
||||
key, value, l);
|
||||
return l;
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ public final class HttpServer2 implements FilterContainer {
|
|||
|
||||
public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY =
|
||||
"hadoop.http.socket.backlog.size";
|
||||
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
|
||||
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 500;
|
||||
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
|
||||
public static final String HTTP_ACCEPTOR_COUNT_KEY =
|
||||
"hadoop.http.acceptor.count";
|
||||
|
@ -497,7 +497,12 @@ public final class HttpServer2 implements FilterContainer {
|
|||
prefix -> this.conf.get(prefix + "type")
|
||||
.equals(PseudoAuthenticationHandler.TYPE))
|
||||
) {
|
||||
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
|
||||
server.initSpnego(
|
||||
conf,
|
||||
hostName,
|
||||
getFilterProperties(conf, authFilterConfigurationPrefixes),
|
||||
usernameConfKey,
|
||||
keytabConfKey);
|
||||
}
|
||||
|
||||
for (URI ep : endpoints) {
|
||||
|
@ -1340,8 +1345,12 @@ public final class HttpServer2 implements FilterContainer {
|
|||
}
|
||||
|
||||
private void initSpnego(Configuration conf, String hostName,
|
||||
String usernameConfKey, String keytabConfKey) throws IOException {
|
||||
Properties authFilterConfigurationPrefixes, String usernameConfKey, String keytabConfKey)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
for (Map.Entry<Object, Object> entry : authFilterConfigurationPrefixes.entrySet()) {
|
||||
params.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
|
||||
}
|
||||
String principalInConf = conf.get(usernameConfKey);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
|
||||
|
@ -1967,4 +1976,8 @@ public final class HttpServer2 implements FilterContainer {
|
|||
return metrics;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
List<ServerConnector> getListeners() {
|
||||
return listeners;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for embedded HTTP services.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.http;
|
||||
|
|
|
@ -158,6 +158,9 @@ public class DefaultStringifier<T> implements Stringifier<T> {
|
|||
public static <K> void storeArray(Configuration conf, K[] items,
|
||||
String keyName) throws IOException {
|
||||
|
||||
if (items.length == 0) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
|
||||
GenericsUtil.getClass(items[0]));
|
||||
try {
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.nio.file.StandardOpenOption;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -246,30 +245,6 @@ public class IOUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
*
|
||||
* @param log the log to record problems to at debug level. Can be null.
|
||||
* @param closeables the objects to close
|
||||
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
|
||||
* instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void cleanup(Log log, java.io.Closeable... closeables) {
|
||||
for (java.io.Closeable c : closeables) {
|
||||
if (c != null) {
|
||||
try {
|
||||
c.close();
|
||||
} catch(Throwable e) {
|
||||
if (log != null && log.isDebugEnabled()) {
|
||||
log.debug("Exception in closing " + c, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
|
|
|
@ -2006,7 +2006,7 @@ public class SequenceFile {
|
|||
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
||||
.opt(FS_OPTION_OPENFILE_BUFFER_SIZE, bufferSize);
|
||||
if (length >= 0) {
|
||||
builder.opt(FS_OPTION_OPENFILE_LENGTH, length);
|
||||
builder.optLong(FS_OPTION_OPENFILE_LENGTH, length);
|
||||
}
|
||||
return awaitFuture(builder.build());
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class WritableName {
|
|||
) throws IOException {
|
||||
Class<?> writableClass = NAME_TO_CLASS.get(name);
|
||||
if (writableClass != null)
|
||||
return writableClass.asSubclass(Writable.class);
|
||||
return writableClass;
|
||||
try {
|
||||
return conf.getClassByName(name);
|
||||
} catch (ClassNotFoundException e) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue