12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589 |
- <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
- <!-- Generated by the JDiff Javadoc doclet -->
- <!-- (http://www.jdiff.org) -->
- <!-- on Sun Dec 04 01:00:08 UTC 2011 -->
- <api
- xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
- xsi:noNamespaceSchemaLocation='api.xsd'
- name="hadoop-hdfs 0.22.0"
- jdversion="1.0.9">
- <!-- Command line arguments = -doclet jdiff.JDiff -docletpath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/ivy/lib/Hadoop-Hdfs/jdiff/jdiff-1.0.9.jar:/x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/ivy/lib/Hadoop-Hdfs/jdiff/xerces-1.4.4.jar -classpath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/build/classes:/x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/conf:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-common/jars/hadoop-common-0.22.0-SNAPSHOT.jar:/home/jenkins/.ivy2/cache/commons-cli/commons-cli/jars/commons-cli-1.2.jar:/home/jenkins/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/jenkins/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.4.jar:/home/jenkins/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.1.1.jar:/home/jenkins/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.6.1.jar:/home/jenkins/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.6.1.jar:/home/jenkins/.ivy2/cache/log4j/log4j/bundles/log4j-1.2.16.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.26.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.26.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/servlet-api/jars/servlet-api-2.5-20081211.jar:/home/jenkins/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/jenkins/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-2.1-jetty/jars/jsp-2.1-jetty-6.1.26.jar:/home/jenkins/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-api-2.1-glassfish/jars/jsp-api-2.1-glassfish-2.1.v20091210.jar:/home/jenkins/.ivy2/cache/org.mortbay.jetty/jsp-2.1-glassfish/jars/jsp-2.1-glassfish-2.1.v20091210.jar:/home/jenkins/.ivy2/cache/org.eclipse.jdt.core.compiler/ecj/jars/ecj-3.5.1.jar:/home/jenkins/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/jenkins/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.7.1.jar:/home/jenkins/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.1.jar:/home/jenkins/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/jenkins/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/jenkins/.ivy2/cache/net.sf.kosmosfs/kfs/jars/kfs-0.3.jar:/home/jenkins/.ivy2/cache/junit/junit/jars/junit-4.8.1.jar:/home/jenkins/.ivy2/cache/hsqldb/hsqldb/jars/hsqldb-1.8.0.10.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro/jars/avro-1.5.3.jar:/home/jenkins/.ivy2/cache/org.codehaus.jackson/jackson-mapper-asl/jars/jackson-mapper-asl-1.7.3.jar:/home/jenkins/.ivy2/cache/org.codehaus.jackson/jackson-core-asl/jars/jackson-core-asl-1.7.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer/jars/paranamer-2.3.jar:/home/jenkins/.ivy2/cache/org.xerial.snappy/snappy-java/bundles/snappy-java-1.0.3.2.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro-ipc/jars/avro-ipc-1.5.3.jar:/home/jenkins/.ivy2/cache/commons-daemon/commons-daemon/jars/commons-daemon-1.0.1.jar:/home/jenkins/.ivy2/cache/org.apache.avro/avro-compiler/jars/avro-compiler-1.5.3.jar:/home/jenkins/.ivy2/cache/commons-lang/commons-lang/jars/commons-lang-2.5.jar:/home/jenkins/.ivy2/cache/org.apache.velocity/velocity/jars/velocity-1.6.4.jar:/home/jenkins/.ivy2/cache/commons-collections/commons-collections/jars/commons-collections-3.2.1.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer-ant/jars/paranamer-ant-2.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.paranamer/paranamer-generator/jars/paranamer-generator-2.3.jar:/home/jenkins/.ivy2/cache/com.thoughtworks.qdox/qdox/jars/qdox-1.12.jar:/home/jenkins/.ivy2/cache/asm/asm/jars/asm-3.3.jar:/home/jenkins/.ivy2/cache/org.apache.ant/ant/jars/ant-1.7.1.jar:/home/jenkins/.ivy2/cache/org.apache.ant/ant-launcher/jars/ant-launcher-1.7.1.jar:/home/jenkins/.ivy2/cache/org.aspectj/aspectjrt/jars/aspectjrt-1.6.5.jar:/home/jenkins/.ivy2/cache/org.aspectj/aspectjtools/jars/aspectjtools-1.6.5.jar:/home/jenkins/.ivy2/cache/org.mockito/mockito-all/jars/mockito-all-1.8.2.jar:/home/jenkins/.ivy2/cache/com.google.guava/guava/jars/guava-r09.jar:/home/jenkins/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/jenkins/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/jenkins/tools/ant/latest/lib/ant-launcher.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/xercesImpl.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-resolver.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-bcel.jar:/home/jenkins/tools/ant/latest/lib/ant-jsch.jar:/home/jenkins/tools/ant/latest/lib/ant-jmf.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-oro.jar:/home/jenkins/tools/ant/latest/lib/ant-netrexx.jar:/home/jenkins/tools/ant/latest/lib/ant-testutil.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-xalan2.jar:/home/jenkins/tools/ant/latest/lib/ant-javamail.jar:/home/jenkins/tools/ant/latest/lib/ant.jar:/home/jenkins/tools/ant/latest/lib/ant-junit.jar:/home/jenkins/tools/ant/latest/lib/ant-swing.jar:/home/jenkins/tools/ant/latest/lib/ant-commons-net.jar:/home/jenkins/tools/ant/latest/lib/ant-jdepend.jar:/home/jenkins/tools/ant/latest/lib/ant-junit4.jar:/home/jenkins/tools/ant/latest/lib/ant-commons-logging.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-bsf.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-log4j.jar:/home/jenkins/tools/ant/latest/lib/ant-jai.jar:/home/jenkins/tools/ant/latest/lib/ant-apache-regexp.jar:/home/jenkins/tools/ant/latest/lib/ant-antlr.jar:/tmp/jdk1.6.0_29/lib/tools.jar -sourcepath /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/src/java -apidir /x1/jenkins/jenkins-slave/workspace/Hadoop-22-Build/common/hdfs/lib/jdiff -apiname hadoop-hdfs 0.22.0 -->
- <package name="org.apache.hadoop.fs">
- <!-- start class org.apache.hadoop.fs.Hdfs -->
- <class name="Hdfs" extends="org.apache.hadoop.fs.AbstractFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getUriDefaultPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="createFlag" type="java.util.EnumSet"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="bytesPerChecksum" type="int"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="mkdir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="renameInternal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="renameInternal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <param name="overwrite" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="setVerifyChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="supportsSymlinks" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="org.apache.hadoop.fs.Path"/>
- <param name="link" type="org.apache.hadoop.fs.Path"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.fs.Hdfs -->
- </package>
- <package name="org.apache.hadoop.hdfs">
- <!-- start class org.apache.hadoop.hdfs.BlockMissingException -->
- <class name="BlockMissingException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockMissingException" type="java.lang.String, java.lang.String, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[An exception that indicates that file was corrupted.
- @param filename name of corrupted file
- @param description a description of the corruption details]]>
- </doc>
- </constructor>
- <method name="getFile" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the name of the corrupted file.
- @return name of corrupted file]]>
- </doc>
- </method>
- <method name="getOffset" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the offset at which this file is corrupted
- @return offset of corrupted file]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This exception is thrown when a read encounters a block that has no locations
- associated with it.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.BlockMissingException -->
- <!-- start class org.apache.hadoop.hdfs.BlockReader -->
- <class name="BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="read" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="off" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="skip" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="n" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seekToNewSource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="targetPos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seek"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getChunkPosition" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- </method>
- <method name="readChunk" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <param name="checksumBuf" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Java Doc required]]>
- </doc>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new BlockReader specifically to satisfy a read.
- This method also sends the OP_READ_BLOCK request.
- @param sock An established Socket to the DN. The BlockReader will not close it normally
- @param file File location
- @param block The block object
- @param blockToken The block token for security
- @param startOffset The read offset, relative to block head
- @param len The number of bytes to read
- @param bufferSize The IO buffer size (not the client buffer size)
- @param verifyChecksum Whether to verify checksum
- @param clientName Client name
- @return New BlockReader instance, or null on error.]]>
- </doc>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readAll" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[kind of like readFully(). Only reads as much as possible.
- And allows use of protected readFully().]]>
- </doc>
- </method>
- <method name="takeSocket" return="java.net.Socket"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Take the socket used to talk to the DN.]]>
- </doc>
- </method>
- <method name="hasSentStatusCode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Whether the BlockReader has reached the end of its input stream
- and successfully sent a status code back to the datanode.]]>
- </doc>
- </method>
- <method name="getFileName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="s" type="java.net.InetSocketAddress"/>
- <param name="blockId" type="long"/>
- </method>
- <doc>
- <![CDATA[This is a wrapper around connection to datanode
- and understands checksum, offset etc.
- Terminology:
- <dl>
- <dt>block</dt>
- <dd>The hdfs block, typically large (~64MB).
- </dd>
- <dt>chunk</dt>
- <dd>A block is divided into chunks, each comes with a checksum.
- We want transfers to be chunk-aligned, to be able to
- verify checksums.
- </dd>
- <dt>packet</dt>
- <dd>A grouping of chunks used for transport. It contains a
- header, followed by checksum data, followed by real data.
- </dd>
- </dl>
- Please see DataNode for the RPC specification.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.BlockReader -->
- <!-- start class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
- <class name="DeprecatedUTF8" extends="org.apache.hadoop.io.UTF8"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DeprecatedUTF8"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DeprecatedUTF8" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct from a given string.]]>
- </doc>
- </constructor>
- <constructor name="DeprecatedUTF8" type="org.apache.hadoop.hdfs.DeprecatedUTF8"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct from a given string.]]>
- </doc>
- </constructor>
- <method name="readString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeString" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <param name="s" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A simple wrapper around {@link org.apache.hadoop.io.UTF8}.
- This class should be used only when it is absolutely necessary
- to use {@link org.apache.hadoop.io.UTF8}. The only difference is that
- using this class does not require "@SuppressWarning" annotation to avoid
- javac warning. Instead the deprecation is implied in the class name.
-
- This should be treated as package private class to HDFS.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DeprecatedUTF8 -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient -->
- <class name="DFSClient" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.io.Closeable"/>
- <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="Deprecated at 0.21">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(NameNode.getAddress(conf), conf);
- @see #DFSClient(InetSocketAddress, Configuration)
- @deprecated Deprecated at 0.21]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(nameNodeAddr, conf, null);
- @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as this(nameNodeAddr, null, conf, stats);
- @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)]]>
- </doc>
- </constructor>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The locking hierarchy is to first acquire lock on DFSClient object, followed by
- lock on leasechecker, followed by lock on an individual DFSOutputStream.]]>
- </doc>
- </method>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Close the file system, abandoning all of the leases and files being
- created and close connections to the namenode.]]>
- </doc>
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the default block size for this cluster
- @return the default block size in bytes]]>
- </doc>
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#getPreferredBlockSize(String)]]>
- </doc>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get server default values for a number of configuration params.
- @see ClientProtocol#getServerDefaults()]]>
- </doc>
- </method>
- <method name="stringifyToken" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A test method for printing out tokens
- @param token
- @return Stringify version of the token]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#getDelegationToken(Text)]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#renewDelegationToken(Token)]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#cancelDelegationToken(Token)]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report corrupt blocks that were discovered by the client.
- @see ClientProtocol#reportBadBlocks(LocatedBlock[])]]>
- </doc>
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="start" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get block location info about file
-
- getBlockLocations() returns a list of hostnames that store
- data for a specific file region. It returns a set of hostnames
- for every block within the indicated region.
- This function is very useful when writing code that considers
- data-placement when performing operations. For example, the
- MapReduce system tries to schedule tasks on the same machines
- as the data-block the task processes.]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #open(String, int, boolean)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="buffersize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="stats" type="org.apache.hadoop.fs.FileSystem.Statistics"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create an input stream that obtains a nodelist from the
- namenode, and then reads from all the right places. Creates
- inner subclass of InputStream that does the right out-of-band
- work.
- @deprecated Use {@link #open(String, int, boolean)} instead.]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="buffersize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create an input stream that obtains a nodelist from the
- namenode, and then reads from all the right places. Creates
- inner subclass of InputStream that does the right out-of-band
- work.]]>
- </doc>
- </method>
- <method name="getNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the namenode associated with this DFSClient object
- @return the namenode associated with this DFSClient object]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
- default <code>replication</code> and <code>blockSize<code> and null <code>
- progress</code>.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
- default <code>replication</code> and <code>blockSize<code>.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, boolean, short, long, Progressable)} with
- null <code>progress</code>.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, boolean, short, long, Progressable, int)}
- with default bufferSize.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, FsPermission, EnumSet, short, long,
- Progressable, int)} with default <code>permission</code>
- {@link FsPermission#getDefault()}.
-
- @param src File name
- @param overwrite overwrite an existing file if true
- @param replication replication factor for the file
- @param blockSize maximum block size
- @param progress interface for reporting client progress
- @param buffersize underlying buffersize
-
- @return output stream]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call {@link #create(String, FsPermission, EnumSet, boolean, short,
- long, Progressable, int)} with <code>createParent</code> set to true.]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- with write-progress reporting and return an output stream for writing
- into the file.
-
- @param src File name
- @param permission The permission of the directory being created.
- If null, use default permission {@link FsPermission#getDefault()}
- @param flag indicates create a new file or create/overwrite an
- existing file or append to an existing file
- @param createParent create missing parent directory if true
- @param replication block replication
- @param blockSize maximum block size
- @param progress interface for reporting client progress
- @param buffersize underlying buffer size
-
- @return output stream
-
- @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
- boolean, short, long) for detailed description of exceptions thrown]]>
- </doc>
- </method>
- <method name="primitiveCreate" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <param name="bytesPerChecksum" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Same as {{@link #create(String, FsPermission, EnumSet, short, long,
- Progressable, int)} except that the permission
- is absolute (ie has already been masked with umask.]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates a symbolic link.
-
- @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Resolve the *first* symlink, if any, in the path.
-
- @see ClientProtocol#getLinkTarget(String)]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
- @param src file name
- @param replication
-
- @see ClientProtocol#setReplication(String, short)]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename file or directory.
- @see ClientProtocol#rename(String, String)
- @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Move blocks from src to trg and delete src
- See {@link ClientProtocol#concat(String, String [])}.]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename file or directory.
- @see ClientProtocol#rename(String, String, Options.Rename...)]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Delete file or directory.
- See {@link ClientProtocol#delete(String)}.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[delete file or directory.
- delete contents of the directory if non empty and recursive
- set to true
- @see ClientProtocol#delete(String, boolean)]]>
- </doc>
- </method>
- <method name="exists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Implemented using getFileInfo(src)]]>
- </doc>
- </method>
- <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- No block locations need to be fetched]]>
- </doc>
- </method>
- <method name="listPaths" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <param name="needLocation" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
- if the application wants to fetch a listing starting from
- the first entry in the directory
- @see ClientProtocol#getListing(String, byte[], boolean)]]>
- </doc>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found
-
- @see ClientProtocol#getFileInfo(String) for description of exceptions]]>
- </doc>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory. If src
- refers to a symlink then the FileStatus of the link is returned.
- @param src path to a file or directory.
-
- For description of exceptions thrown
- @see ClientProtocol#getFileLinkInfo(String)]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the checksum of a file.
- @param src The file path
- @return The checksum
- @see DistributedFileSystem#getFileChecksum(Path)]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="socketFactory" type="javax.net.SocketFactory"/>
- <param name="socketTimeout" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the checksum of a file.
- @param src The file path
- @return The checksum]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions to a file or directory.
- @param src path name.
- @param permission
-
- @see ClientProtocol#setPermission(String, FsPermission)]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set file or directory owner.
- @param src path name.
- @param username user id.
- @param groupname user group.
-
- @see ClientProtocol#setOwner(String, String, String)]]>
- </doc>
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#getStats()]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
- @throws IOException]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
-
- @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
- See {@link ClientProtocol#refreshNodes()}
- for more details.
-
- @see ClientProtocol#refreshNodes()]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
-
- @see ClientProtocol#metaSave(String)]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param permission The permission of the directory being created.
- If permission == null, use {@link FsPermission#getDefault()}.
- @param createParent create missing parent directory if true
-
- @return True if the operation success.
-
- @see ClientProtocol#mkdirs(String, FsPermission, boolean)]]>
- </doc>
- </method>
- <method name="primitiveMkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="absPermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same {{@link #mkdirs(String, FsPermission, boolean)} except
- that the permissions has already been masked against umask.]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[set the modification and access time of a file
-
- @see ClientProtocol#setTimes(String, long, long)]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SERVER_DEFAULTS_VALIDITY_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DFSClient can connect to a Hadoop Filesystem and
- perform basic file tasks. It uses the ClientProtocol
- to communicate with a NameNode daemon, and connects
- directly to DataNodes to read/write block data.
- Hadoop DFS users should obtain an instance of
- DistributedFileSystem, which uses DFSClient to handle
- filesystem tasks.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
- <class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSClient.DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the datanode from which the stream is currently reading.]]>
- </doc>
- </method>
- <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block containing the target position.]]>
- </doc>
- </method>
- <method name="getVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return The visible length of the file.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[The Hdfs implementation of {@link FSDataInputStream}]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
- <!-- start class org.apache.hadoop.hdfs.DFSConfigKeys -->
- <class name="DFSConfigKeys" extends="org.apache.hadoop.fs.CommonConfigurationKeys"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSConfigKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <field name="DFS_BLOCK_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_SIZE_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_DEFAULT" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_STREAM_BUFFER_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_STREAM_BUFFER_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BYTES_PER_CHECKSUM_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BYTES_PER_CHECKSUM_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_WRITE_PACKET_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_MAX_OBJECTS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_MAX_OBJECTS_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT" type="float"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_UPGRADE_PERMISSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_NEED_AUTH_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_CACHED_CONN_RETRY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MIN_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MIN_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_ENABLED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_ENABLED_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_SUPERUSERGROUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_ADMIN" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_RESTORE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_LIST_LIMIT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_LIST_LIMIT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_EDITS_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_READ_PREFETCH_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_RETRY_WINDOW_BASE" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_METRICS_SESSION_ID_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HOST_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_STORAGEID_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HOSTS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HOSTS_EXCLUDE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_SOCKET_TIMEOUT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BALANCER_MOVEDWINWIDTH_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BALANCER_MOVEDWINWIDTH_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_PERMISSION_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_INTERFACE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_INTERFACE_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_NAMESERVER_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DNS_NAMESERVER_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DU_RESERVED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_DU_RESERVED_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HANDLER_COUNT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HANDLER_COUNT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTP_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTP_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_MAX_RECEIVER_THREADS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_NUMBLOCKS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_NUMBLOCKS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HEARTBEAT_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HEARTBEAT_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HANDLER_COUNT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_HANDLER_COUNT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SUPPORT_APPEND_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SUPPORT_APPEND_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HTTPS_ENABLE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_HTTPS_ENABLE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTPS_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_HTTPS_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_IPC_ADDRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_IPC_ADDRESS_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_MAX_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_REPLICATION_MAX_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DF_INTERVAL_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DF_INTERVAL_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INITIAL_DELAY_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_COMPRESS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_COMPRESS_DEFAULT" type="boolean"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_COMPRESSION_CODEC_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_COMPRESSION_CODEC_DEFAULT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_TRANSFER_RATE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_IMAGE_TRANSFER_RATE_DEFAULT" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_PLUGINS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_STARTUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_PLUGINS_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_WEB_UGI_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_STARTUP_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_KEYTAB_FILE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_DATANODE_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SECONDARY_NAMENODE_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class contains constants for configuration keys used
- in hdfs.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSConfigKeys -->
- <!-- start class org.apache.hadoop.hdfs.DFSInputStream -->
- <class name="DFSInputStream" extends="org.apache.hadoop.fs.FSInputStream"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFileLength" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the datanode from which the stream is currently reading.]]>
- </doc>
- </method>
- <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block containing the target position.]]>
- </doc>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Close it down!]]>
- </doc>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="off" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read the entire buffer.]]>
- </doc>
- </method>
- <method name="getBlockReader" return="org.apache.hadoop.hdfs.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="dnAddr" type="java.net.InetSocketAddress"/>
- <param name="file" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Retrieve a BlockReader suitable for reading.
- This method will reuse the cached connection to the DN if appropriate.
- Otherwise, it will create a new connection.
- @param dnAddr Address of the datanode
- @param file File location
- @param block The Block object
- @param blockToken The access token for security
- @param startOffset The read offset, relative to block head
- @param len The number of bytes to read
- @param bufferSize The IO buffer size (not the client buffer size)
- @param verifyChecksum Whether to verify checksum
- @param clientName Client name
- @return New BlockReader instance]]>
- </doc>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="position" type="long"/>
- <param name="buffer" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="length" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read bytes starting from the specified position.
-
- @param position start read from this position
- @param buffer read buffer
- @param offset offset into buffer
- @param length number of bytes to read
-
- @return actual number of bytes read]]>
- </doc>
- </method>
- <method name="skip" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="n" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seek"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="targetPos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Seek to a new arbitrary location]]>
- </doc>
- </method>
- <method name="seekToNewSource" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="targetPos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Seek to given position on a node other than the current node. If
- a node other than the current node is found, then returns true.
- If another node could not be found, then returns false.]]>
- </doc>
- </method>
- <method name="getPos" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="available" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the size of the remaining available bytes
- if the size is less than or equal to {@link Integer#MAX_VALUE},
- otherwise, return {@link Integer#MAX_VALUE}.]]>
- </doc>
- </method>
- <method name="markSupported" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[We definitely don't support marks]]>
- </doc>
- </method>
- <method name="mark"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="readLimit" type="int"/>
- </method>
- <method name="reset"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[DFSInputStream provides bytes from a named file. It handles
- negotiation of the namenode and various datanodes as necessary.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSInputStream -->
- <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
- <class name="DFSUtil" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSUtil"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isValidName" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
- and names which contain a ":" or "/"]]>
- </doc>
- </method>
- <method name="bytes2String" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bytes" type="byte[]"/>
- <doc>
- <![CDATA[Converts a byte array to a string using UTF8 encoding.]]>
- </doc>
- </method>
- <method name="string2Bytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="str" type="java.lang.String"/>
- <doc>
- <![CDATA[Converts a string to a byte array using UTF8 encoding.]]>
- </doc>
- </method>
- <method name="byteArray2String" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathComponents" type="byte[][]"/>
- <doc>
- <![CDATA[Given a list of path components returns a path as a UTF8 String]]>
- </doc>
- </method>
- <method name="bytes2byteArray" return="byte[][]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bytes" type="byte[]"/>
- <param name="separator" type="byte"/>
- <doc>
- <![CDATA[Splits the array of bytes into array of arrays of bytes
- on byte separator
- @param bytes the array of bytes to split
- @param separator the delimiting byte]]>
- </doc>
- </method>
- <method name="bytes2byteArray" return="byte[][]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bytes" type="byte[]"/>
- <param name="len" type="int"/>
- <param name="separator" type="byte"/>
- <doc>
- <![CDATA[Splits first len bytes in bytes to array of arrays of bytes
- on byte separator
- @param bytes the byte array to split
- @param len the number of bytes to split
- @param separator the delimiting byte]]>
- </doc>
- </method>
- <method name="locatedBlocks2Locations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
- <doc>
- <![CDATA[Convert a LocatedBlocks to BlockLocations[]
- @param blocks a LocatedBlocks
- @return an array of BlockLocations]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
- <!-- start class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
- <class name="DFSUtil.ErrorSimulator" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSUtil.ErrorSimulator"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initializeErrorSimulationEvent"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="numberOfEvents" type="int"/>
- </method>
- <method name="getErrorSimulation" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <method name="setErrorSimulation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <method name="clearErrorSimulation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- </method>
- <doc>
- <![CDATA[Utility class to facilitate junit test error simulation.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DistributedFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uri" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkPath"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Permit paths which explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="makeQualified" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Normalize paths that explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setVerifyChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="verifyChecksum" type="boolean"/>
- </method>
- <method name="recoverLease" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start the lease recovery of a file
- @param f a file
- @return true if the file is already closed
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="bytesPerChecksum" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="flag" type="java.util.EnumSet"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Same as create(), except fails if parent directory doesn't already exist.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="org.apache.hadoop.fs.Path"/>
- <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[THIS IS DFS only operations, it is not part of FileSystem
- move blocks from srcs to trg
- and delete srcs afterwards
- all blocks should be the same size
- @param trg existing file to append to
- @param psrcs list of files (same block size, same replication)
- @throws IOException]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}
- This rename operation is guaranteed to be atomic.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set a directory's quotas
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
- </doc>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[List all the entries of a directory
- Note that this operation is not atomic for a large directory.
- The entries of a directory may be fetched from NameNode multiple times.
- It only guarantees that each name occurs once if a directory
- undergoes changes between the calls.]]>
- </doc>
- </method>
- <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory with given name and permission, only when
- parent directory exists.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="primitiveMkdir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the disk usage of the filesystem, including total capacity,
- used space, and remaining space
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getRawCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw capacity of the filesystem, disregarding
- replication.
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getRawUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw used space in the filesystem, disregarding
- replication.
- @deprecated Use {@link org.apache.hadoop.fs.FileSystem#getStatus()}
- instead]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return statistics for each datanode.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[enable/disable/check restoreFaileStorage
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
- files.]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previously upgraded files system state.
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="reportChecksumFailure" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="inPos" type="long"/>
- <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="sumsPos" type="long"/>
- <doc>
- <![CDATA[We need to find the blocks that didn't match. Likely only one
- is corrupt but we will report both to the namenode. In the future,
- we can consider figuring out exactly which block is corrupt.]]>
- </doc>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the stat information about the file.
- @throws FileNotFoundException if the file does not exist.]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="getDefaultPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link #getDelegationToken(String)}">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a valid Delegation Token.
-
- @param renewer Name of the designated renewer for the token
- @return Token<DelegationTokenIdentifier>
- @throws IOException
- @deprecated use {@link #getDelegationToken(String)}]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Renew an existing delegation token.
-
- @param token delegation token obtained earlier
- @return the new expiration time
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Cancel an existing delegation token.
-
- @param token delegation token
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Implementation of the abstract FileSystem for the DFS system.
- This object is the way end-user code interacts with a Hadoop
- DistributedFileSystem.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <class name="DistributedFileSystem.DiskStatus" extends="org.apache.hadoop.fs.FsStatus"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="Use {@link org.apache.hadoop.fs.FsStatus} instead">
- <constructor name="DistributedFileSystem.DiskStatus" type="org.apache.hadoop.fs.FsStatus"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[@deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <!-- start class org.apache.hadoop.hdfs.HdfsConfiguration -->
- <class name="HdfsConfiguration" extends="org.apache.hadoop.conf.Configuration"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HdfsConfiguration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="HdfsConfiguration" type="boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="HdfsConfiguration" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="init"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This method is here so that when invoked, HdfsConfiguration is class-loaded if
- it hasn't already been previously loaded. Upon loading the class, the static
- initializer block above will be executed to add the deprecated keys and to add
- the default resources. It is safe for this method to be called multiple times
- as the static initializer block will only get invoked once.
-
- This replaces the previously, dangerous practice of other classes calling
- Configuration.addDefaultResource("hdfs-default.xml") directly without loading
- HdfsConfiguration class first, thereby skipping the key deprecation]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Adds deprecated keys into the configuration.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HdfsConfiguration -->
- <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HDFSPolicyProvider"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
- <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDateFormat" return="java.text.SimpleDateFormat"
- abstract="false" native="false" synchronized="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="getCanonicalServiceName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
- @param path The path component of the URL
- @param query The query component of the URL]]>
- </doc>
- </method>
- <method name="updateQuery" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="nnAddr" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ran" type="java.util.Random"
- transient="false" volatile="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="HFTP_TIMEZONE" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HFTP_DATE_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HFTP_SERVICE_NAME_KEY" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="df" type="java.lang.ThreadLocal"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
- The following implementation provides a limited, read-only interface
- to a filesystem over HTTP.
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HsftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS. The
- following implementation provides a limited, read-only interface to a
- filesystem over HTTPS.
-
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <implements name="javax.net.ssl.HostnameVerifier"/>
- <constructor name="HsftpFileSystem.DummyHostnameVerifier"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="verify" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="hostname" type="java.lang.String"/>
- <param name="session" type="javax.net.ssl.SSLSession"/>
- </method>
- <doc>
- <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
- <class name="HsftpFileSystem.DummyTrustManager" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <implements name="javax.net.ssl.X509TrustManager"/>
- <constructor name="HsftpFileSystem.DummyTrustManager"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="checkClientTrusted"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="chain" type="java.security.cert.X509Certificate[]"/>
- <param name="authType" type="java.lang.String"/>
- </method>
- <method name="checkServerTrusted"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="chain" type="java.security.cert.X509Certificate[]"/>
- <param name="authType" type="java.lang.String"/>
- </method>
- <method name="getAcceptedIssuers" return="java.security.cert.X509Certificate[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Dummy trustmanager that is used to trust all server certificates]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyTrustManager -->
- <doc>
- <![CDATA[<p>A distributed implementation of {@link
- org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
- Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
- <p>The most important difference is that unlike GFS, Hadoop DFS files
- have strictly one writer at any one time. Bytes are always appended
- to the end of the writer's stream. There is no notion of "record appends"
- or "mutations" that are then checked or reordered. Writers simply emit
- a byte stream. That byte stream is guaranteed to be stored in the
- order written.</p>]]>
- </doc>
- </package>
- <package name="org.apache.hadoop.hdfs.protocol">
- <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception that happens when you ask to create a file that already
- is being created, but is not closed yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
- <class name="Block" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <implements name="java.lang.Comparable"/>
- <constructor name="Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="java.io.File, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Find the blockid from the given filename]]>
- </doc>
- </constructor>
- <method name="isBlockFilename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.io.File"/>
- </method>
- <method name="filename2id" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="isMetaFilename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="metaFile" type="java.lang.String"/>
- <doc>
- <![CDATA[Get generation stamp from the name of the metafile name]]>
- </doc>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="metaFile" type="java.lang.String"/>
- <doc>
- <![CDATA[Get the blockId from the name of the metafile name]]>
- </doc>
- </method>
- <method name="set"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <param name="len" type="long"/>
- <param name="genStamp" type="long"/>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setBlockId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bid" type="long"/>
- </method>
- <method name="getBlockName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNumBytes" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setNumBytes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="len" type="long"/>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="BLOCK_FILE_PREFIX" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="METADATA_EXTENSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockFilePattern" type="java.util.regex.Pattern"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="metaFilePattern" type="java.util.regex.Pattern"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A Block is a Hadoop FS primitive, identified by a
- long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
- <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <class name="BlockListAsLongs" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Iterable"/>
- <constructor name="BlockListAsLongs" type="java.util.List, java.util.List"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create block report from finalized and under construction lists of blocks.
-
- @param finalized - list of finalized blocks
- @param uc - list of under construction blocks]]>
- </doc>
- </constructor>
- <constructor name="BlockListAsLongs"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockListAsLongs" type="long[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param iBlockList - BlockListALongs create from this long[] parameter]]>
- </doc>
- </constructor>
- <method name="getBlockListAsLongs" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="iterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns an iterator over blocks in the block report.]]>
- </doc>
- </method>
- <method name="getBlockReportIterator" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns {@link BlockReportIterator}.]]>
- </doc>
- </method>
- <method name="getNumberOfBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The number of blocks
- @return - the number of blocks]]>
- </doc>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-id of the indexTh block
- @param index - the block whose block-id is desired
- @return the block-id]]>
- </doc>
- </method>
- <method name="getBlockLen" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-len of the indexTh block
- @param index - the block whose block-len is desired
- @return - the block-len]]>
- </doc>
- </method>
- <method name="getBlockGenStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The generation stamp of the indexTh block
- @param index - the block whose block-len is desired
- @return - the generation stamp]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides an interface for accessing list of blocks that
- has been implemented as long[].
- This class is useful for block report. Rather than send block reports
- as a Block[] we can send it as a long[].
- The structure of the array is as follows:
- 0: the length of the finalized replica list;
- 1: the length of the under-construction replica list;
- - followed by finalized replica list where each replica is represented by
- 3 longs: one for the blockId, one for the block length, and one for
- the generation stamp;
- - followed by the invalid replica represented with three -1s;
- - followed by the under-construction replica list where each replica is
- represented by 4 longs: three for the block id, length, generation
- stamp, and the forth for the replica state.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
- <class name="BlockListAsLongs.BlockReportIterator" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.util.Iterator"/>
- <method name="hasNext" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="next" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="remove"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the state of the current replica.
- The state corresponds to the replica returned
- by the latest {@link #next()}.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Iterates over blocks in the block report.
- Avoids object allocation on each iteration.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <interface name="ClientDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the visible length of a replica.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[6: recoverBlock() removed.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An client-datanode protocol for block recovery]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <interface name="ClientProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get locations of the blocks of the specified file within the specified range.
- DataNode locations for each block are sorted by
- the proximity to the client.
- <p>
- Return {@link LocatedBlocks} which contains
- file length, blocks and their locations.
- DataNode locations for each block are sorted by
- the distance to the client's address.
- <p>
- The client will then have to contact
- one of the indicated DataNodes to obtain the actual data.
-
- @param src file name
- @param offset range start offset
- @param length range length
- @return file length and array of blocks with their locations
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> does not exist
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get server default values for a number of configuration params.
- @return a set of server default configuration values
- @throws IOException]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="AlreadyBeingCreatedException" type="org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException"/>
- <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
- <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
- <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new file entry in the namespace.
- <p>
- This will create an empty file specified by the source path.
- The path should reflect a full path originated at the root.
- The name-node does not have a notion of "current" directory for a client.
- <p>
- Once created, the file is visible and available for read to other clients.
- Although, other clients cannot {@link #delete(String, boolean)}, re-create or
- {@link #rename(String, String)} it until the file is completed
- or explicitly as a result of lease expiration.
- <p>
- Blocks have a maximum size. Clients that intend to create
- multi-block files must also use
- {@link #addBlock(String, String, Block, DatanodeInfo[])}
- @param src path of the file being created.
- @param masked masked permission.
- @param clientName name of the current client.
- @param flag indicates whether the file should be
- overwritten if it already exists or create if it does not exist or append.
- @param createParent create missing parent directory if true
- @param replication block replication factor.
- @param blockSize maximum block size.
-
- @throws AccessControlException If access is denied
- @throws AlreadyBeingCreatedException if the path does not exist.
- @throws DSQuotaExceededException If file creation violates disk space
- quota restriction
- @throws FileAlreadyExistsException If file <code>src</code> already exists
- @throws FileNotFoundException If parent of <code>src</code> does not exist
- and <code>createParent</code> is false
- @throws ParentNotDirectoryException If parent of <code>src</code> is not a
- directory.
- @throws NSQuotaExceededException If file creation violates name space
- quota restriction
- @throws SafeModeException create not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred
- RuntimeExceptions:
- @throws InvalidPathException Path <code>src</code> is invalid]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Append to the end of the file.
- @param src path of the file being created.
- @param clientName name of the current client.
- @return information about the last partial block if any.
- @throws AccessControlException if permission to append file is
- denied by the system. As usually on the client side the exception will
- be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
- Allows appending to an existing file if the server is
- configured with the parameter dfs.support.append set to true, otherwise
- throws an IOException.
-
- @throws AccessControlException If permission to append to file is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws DSQuotaExceededException If append violates disk space quota
- restriction
- @throws SafeModeException append not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred.
- RuntimeExceptions:
- @throws UnsupportedOperationException if append is not supported]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
- <p>
- The NameNode sets replication to the new value and returns.
- The actual block replication is not expected to be performed during
- this method call. The blocks will be populated or removed in the
- background as the result of the routine block maintenance procedures.
-
- @param src file name
- @param replication new replication
-
- @return true if successful;
- false if file does not exist or is a directory
- @throws AccessControlException If access is denied
- @throws DSQuotaExceededException If replication violates disk space
- quota restriction
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws SafeModeException not allowed in safemode
- @throws UnresolvedLinkException if <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions for an existing file/directory.
-
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws SafeModeException not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set Owner of a path (i.e. a file or a directory).
- The parameters username and groupname cannot both be null.
- @param src
- @param username If it is null, the original username remains unchanged.
- @param groupname If it is null, the original groupname remains unchanged.
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws SafeModeException not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client can give up on a blcok by calling abandonBlock().
- The client can then
- either obtain a new block, or complete or abandon the file.
- Any partial writes to the block will be discarded.
-
- @throws AccessControlException If access is denied
- @throws FileNotFoundException file <code>src</code> is not found
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludeNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="NotReplicatedYetException" type="org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A client that wants to write an additional block to the
- indicated filename (which must currently be open for writing)
- should call addBlock().
- addBlock() allocates a new block and datanodes the block data
- should be replicated to.
-
- addBlock() also commits the previous block by reporting
- to the name-node the actual generation stamp and the length
- of the block that the client has transmitted to data-nodes.
- @param src the file being created
- @param clientName the name of the client that adds the block
- @param previous previous block
- @param excludeNodes a list of nodes that should not be
- allocated for the current block
- @return LocatedBlock allocated block information.
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws NotReplicatedYetException previous blocks of the file are not
- replicated yet. Blocks cannot be added until replication
- completes.
- @throws SafeModeException create not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client is done writing data to the given filename, and would
- like to complete it.
- The function returns whether the file has been closed successfully.
- If the function returns false, the caller should try again.
-
- close() also commits the last block of the file by reporting
- to the name-node the actual generation stamp and the length
- of the block that the client has transmitted to data-nodes.
- A call to complete() will not return true until all the file's
- blocks have been replicated the minimum number of times. Thus,
- DataNode failures may cause a client to call complete() several
- times before succeeding.
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws SafeModeException create not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client wants to report corrupted blocks (blocks with specified
- locations on datanodes).
- @param blocks Array of located blocks to report]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use {@link #rename(String, String, Options.Rename...)} instead.">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename an item in the file system namespace.
- @param src existing file or directory name.
- @param dst new name.
- @return true if successful, or false if the old name does not exist
- or if the new name already belongs to the namespace.
-
- @throws IOException an I/O error occurred
-
- @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Moves blocks from srcs to trg and delete srcs
-
- @param trg existing file
- @param srcs - list of existing files (same block size, same replication)
- @throws IOException if some arguments are invalid
- @throws UnresolvedLinkException if <code>trg</code> or <code>srcs</code>
- contains a symlink]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="DSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.DSQuotaExceededException"/>
- <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
- <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename src to dst.
- <ul>
- <li>Fails if src is a file and dst is a directory.
- <li>Fails if src is a directory and dst is a file.
- <li>Fails if the parent of dst does not exist or is a file.
- </ul>
- <p>
- Without OVERWRITE option, rename fails if the dst already exists.
- With OVERWRITE option, rename overwrites the dst, if it is a file
- or an empty directory. Rename fails if dst is a non-empty directory.
- <p>
- This implementation of rename is atomic.
- <p>
- @param src existing file or directory name.
- @param dst new name.
- @param options Rename options
-
- @throws AccessControlException If access is denied
- @throws DSQuotaExceededException If rename violates disk space
- quota restriction
- @throws FileAlreadyExistsException If <code>dst</code> already exists and
- <code>options</options> has {@link Rename#OVERWRITE} option
- false.
- @throws FileNotFoundException If <code>src</code> does not exist
- @throws NSQuotaExceededException If rename violates namespace
- quota restriction
- @throws ParentNotDirectoryException If parent of <code>dst</code>
- is not a directory
- @throws SafeModeException rename not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> or
- <code>dst</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link #delete(String, boolean)} istead.">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- Any blocks belonging to the deleted files will be garbage-collected.
-
- @param src existing name.
- @return true only if the existing file or directory was actually removed
- from the file system.
- @throws UnresolvedLinkException if <code>src</code> contains a symlink.
- @deprecated use {@link #delete(String, boolean)} istead.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- same as delete but provides a way to avoid accidentally
- deleting non empty directories programmatically.
- @param src existing name
- @param recursive if true deletes a non empty directory recursively,
- else throws an exception.
- @return true only if the existing file or directory was actually removed
- from the file system.
-
- @throws AccessControlException If access is denied
- @throws FileNotFoundException If file <code>src</code> is not found
- @throws SafeModeException create not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="NSQuotaExceededException" type="org.apache.hadoop.hdfs.protocol.NSQuotaExceededException"/>
- <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param masked The masked permission of the directory being created
- @param createParent create missing parent directory if true
- @return True if the operation success.
- @throws AccessControlException If access is denied
- @throws FileAlreadyExistsException If <code>src</code> already exists
- @throws FileNotFoundException If parent of <code>src</code> does not exist
- and <code>createParent</code> is false
- @throws NSQuotaExceededException If file creation violates quota restriction
- @throws ParentNotDirectoryException If parent of <code>src</code>
- is not a directory
- @throws SafeModeException create not allowed in safemode
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred.
- RunTimeExceptions:
- @throws InvalidPathException If <code>src</code> is invalid]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <param name="needLocation" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- @param src the directory name
- @param startAfter the name to start listing after encoded in java UTF8
- @param needLocation if the FileStatus should contain block locations
- @return a partial listing starting after startAfter
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>src</code> is not found
- @throws UnresolvedLinkException If <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Client programs can cause stateful changes in the NameNode
- that affect other clients. A client may obtain a file and
- neither abandon nor complete it. A client might hold a series
- of locks that prevent other clients from proceeding.
- Clearly, it would be bad if a client held a bunch of locks
- that it never gave up. This can happen easily if the client
- dies unexpectedly.
- <p>
- So, the NameNode will revoke the locks and live file-creates
- for clients that it thinks have died. A client tells the
- NameNode that it is still alive by periodically calling
- renewLease(). If a certain amount of time passes since
- the last call to renewLease(), the NameNode assumes the
- client has died.
- @throws AccessControlException permission denied
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="recoverLease" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start lease recovery.
- Lightweight NameNode operation to trigger lease recovery
-
- @param src path of the file to start lease recovery
- @param clientName name of the current client
- @return true if the file is already closed
- @throws IOException]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a set of statistics about the filesystem.
- Right now, only three values are returned.
- <ul>
- <li> [0] contains the total storage capacity of the system, in bytes.</li>
- <li> [1] contains the total used space of the system, in bytes.</li>
- <li> [2] contains the available storage of the system, in bytes.</li>
- <li> [3] contains number of under replicated blocks in the system.</li>
- <li> [4] contains number of blocks with a corrupt replica. </li>
- <li> [5] contains number of blocks without any good replicas left. </li>
- </ul>
- Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
- actual numbers to index into the array.]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a report on the system's current datanodes.
- One DatanodeInfo object is returned for each DataNode.
- Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
- otherwise all datanodes if type is ALL.]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Get the block size for the given file.
- @param filename The name of the file
- @return The number of bytes in each block
- @throws IOException
- @throws UnresolvedLinkException if the path contains a symlink.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
- <p>
- Safe mode is a name node state when it
- <ol><li>does not accept changes to name space (read-only), and</li>
- <li>does not replicate or delete blocks.</li></ol>
-
- <p>
- Safe mode is entered automatically at name node startup.
- Safe mode can also be entered manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
- <p>
- At startup the name node accepts data node reports collecting
- information about block locations.
- In order to leave safe mode it needs to collect a configurable
- percentage called threshold of blocks, which satisfy the minimal
- replication condition.
- The minimal replication condition is that each block must have at least
- <tt>dfs.namenode.replication.min</tt> replicas.
- When the threshold is reached the name node extends safe mode
- for a configurable amount of time
- to let the remaining data nodes to check in before it
- will start replicating missing blocks.
- Then the name node leaves safe mode.
- <p>
- If safe mode is turned on manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
- then the name node stays in safe mode until it is manually turned off
- using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
- Current state of the name node can be verified using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
- <h4>Configuration parameters:</h4>
- <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
- <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
- <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
-
- <h4>Special cases:</h4>
- The name node does not enter safe mode at startup if the threshold is
- set to 0 or if the name space is empty.<br>
- If the threshold is set to 1 then all blocks need to have at least
- minimal replication.<br>
- If the threshold value is greater than 1 then the name node will not be
- able to turn off safe mode automatically.<br>
- Safe mode can always be turned off manually.
-
- @param action <ul> <li>0 leave safe mode;</li>
- <li>1 enter safe mode;</li>
- <li>2 get safe mode state.</li></ul>
- @return <ul><li>0 if the safe mode is OFF or</li>
- <li>1 if the safe mode is ON.</li></ul>
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
- <p>
- Saves current namespace into storage directories and reset edits log.
- Requires superuser privilege and safe mode.
-
- @throws AccessControlException if the superuser privilege is violated.
- @throws IOException if image creation failed.]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[Enable/Disable restore failed storage.
- <p>
- sets flag to enable restore of failed storage replicas
-
- @throws AccessControlException if the superuser privilege is violated.]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Tells the namenode to reread the hosts and exclude files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previous upgrade.
- Remove file system state saved during the upgrade.
- The upgrade will become irreversible.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
-
- @param action {@link FSConstants.UpgradeAction} to perform
- @return upgrade status information or null if no upgrades are in progress
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode data structures into specified file. If the file
- already exists, then append.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>src</code> is not found
- @throws UnresolvedLinkException if the path contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory. If the path
- refers to a symlink then the FileStatus of the symlink is returned.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found
- @throws AccessControlException permission denied
- @throws UnresolvedLinkException if <code>src</code> contains a symlink
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
- @param path The string representation of the path
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>path</code> is not found
- @throws UnresolvedLinkException if <code>path</code> contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set the quota for a directory.
- @param path The string representation of the path to the directory
- @param namespaceQuota Limit on the number of names in the tree rooted
- at the directory
- @param diskspaceQuota Limit on disk space occupied all the files under
- this directory.
- <br><br>
-
- The quota can have three types of values : (1) 0 or more will set
- the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
- the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
- implies the quota will be reset. Any other value is a runtime error.
-
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>path</code> is not found
- @throws QuotaExceededException if the directory size
- is greater than the given quota
- @throws UnresolvedLinkException if the <code>path</code> contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="client" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write all metadata for this file into persistent storage.
- The file must be currently open for writing.
- @param src The string representation of the path
- @param client The string representation of the client
-
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>src</code> is not found
- @throws UnresolvedLinkException if <code>src</code> contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the modification and access time of the file to the specified time.
- @param src The string representation of the path
- @param mtime The number of milliseconds since Jan 1, 1970.
- Setting mtime to -1 means that modification time should not be set
- by this call.
- @param atime The number of milliseconds since Jan 1, 1970.
- Setting atime to -1 means that access time should not be set
- by this call.
-
- @throws AccessControlException permission denied
- @throws FileNotFoundException file <code>src</code> is not found
- @throws UnresolvedLinkException if <code>src</code> contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create symlink to a file or directory.
- @param target The path of the destination that the
- link points to.
- @param link The path of the link being created.
- @param dirPerm permissions to use when creating parent directories
- @param createParent - if true then missing parent dirs are created
- if false then parent must exist
- @throws AccessControlException permission denied
- @throws FileAlreadyExistsException If file <code>link</code> already exists
- @throws FileNotFoundException If parent of <code>link</code> does not exist
- and <code>createParent</code> is false
- @throws ParentNotDirectoryException If parent of <code>link</code> is not a
- directory.
- @throws UnresolvedLinkException if <code>link</target> contains a symlink.
- @throws IOException If an I/O error occurred]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the target of the given symlink. If there is an intermediate
- symlink in the path (ie a symlink leading up to the final path component)
- then the given path is returned with this symlink resolved.
- @param path The path with a link that needs resolution.
- @return The path after resolving the first symbolic link in the path.
- @throws AccessControlException permission denied
- @throws FileNotFoundException If <code>path</code> does not exist
- @throws IOException If the given path does not refer to a symlink
- or an I/O error occurred]]>
- </doc>
- </method>
- <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a new generation stamp together with an access token for
- a block under construction
-
- This method is called only when a client needs to recover a failed
- pipeline or set up a pipeline for appending to a block.
-
- @param block a block
- @param clientName the name of the client
- @return a located block with a new generation stamp and an access token
- @throws IOException if any error occurs]]>
- </doc>
- </method>
- <method name="updatePipeline"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update a pipeline for a block under construction
-
- @param clientName the name of the client
- @param oldBlock the old block
- @param newBlock the new block containing new generation stamp and length
- @param newNodes datanodes in the pipeline
- @throws IOException if any error occurs]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a valid Delegation Token.
-
- @param renewer the designated renewer for the token
- @return Token<DelegationTokenIdentifier>
- @throws IOException]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Renew an existing delegation token.
-
- @param token delegation token obtained earlier
- @return the new expiration time
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Cancel an existing delegation token.
-
- @param token delegation token
- @throws IOException]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Compared to the previous version the following changes have been introduced:
- (Only the latest change is reflected.
- The log of historical changes can be retrieved from the svn).
- 65: recoverLease return if the file is closed or not]]>
- </doc>
- </field>
- <field name="GET_STATS_CAPACITY_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_USED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_REMAINING_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[ClientProtocol is used by user code via
- {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
- with the NameNode. User code can manipulate the directory namespace,
- as well as open/close file streams, etc.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <class name="DatanodeID" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID("").]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeID copy constructor
-
- @param from]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeID
- @param nodeName (hostname:portNumber)
- @param storageID data storage ID
- @param infoPort info server port
- @param ipcPort ipc server port]]>
- </doc>
- </constructor>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname:portNumber.]]>
- </doc>
- </method>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return data storage ID.]]>
- </doc>
- </method>
- <method name="getInfoPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
- </doc>
- </method>
- <method name="getIpcPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
- </doc>
- </method>
- <method name="setStorageID"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storageID" type="java.lang.String"/>
- <doc>
- <![CDATA[sets the data storage ID.]]>
- </doc>
- </method>
- <method name="getHost" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname and no :portNumber.]]>
- </doc>
- </method>
- <method name="getPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="updateRegInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Update fields when a new registration request comes in.
- Note that this does not update storageID.]]>
- </doc>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Comparable.
- Basis of compare is the String name (host:portNumber) only.
- @param that
- @return as specified by Comparable.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="name" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageID" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="infoPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ipcPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeID is composed of the data node
- name (hostname:portNumber) and the data storage ID,
- which it currently represents.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.net.Node"/>
- <constructor name="DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw capacity.]]>
- </doc>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getNonDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getDfsUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node as percentage of present capacity]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw free space.]]>
- </doc>
- </method>
- <method name="getRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The remaining space as percentage of configured capacity.]]>
- </doc>
- </method>
- <method name="getLastUpdate" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The time when this information was accurate.]]>
- </doc>
- </method>
- <method name="getXceiverCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[number of active connections]]>
- </doc>
- </method>
- <method name="setCapacity"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="capacity" type="long"/>
- <doc>
- <![CDATA[Sets raw capacity.]]>
- </doc>
- </method>
- <method name="setRemaining"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="remaining" type="long"/>
- <doc>
- <![CDATA[Sets raw free space.]]>
- </doc>
- </method>
- <method name="setLastUpdate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="lastUpdate" type="long"/>
- <doc>
- <![CDATA[Sets time when this information was accurate.]]>
- </doc>
- </method>
- <method name="setXceiverCount"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="xceiverCount" type="int"/>
- <doc>
- <![CDATA[Sets number of active connections]]>
- </doc>
- </method>
- <method name="getNetworkLocation" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[rack name]]>
- </doc>
- </method>
- <method name="setNetworkLocation"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="location" type="java.lang.String"/>
- <doc>
- <![CDATA[Sets the rack name]]>
- </doc>
- </method>
- <method name="getHostName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setHostName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="host" type="java.lang.String"/>
- </method>
- <method name="getDatanodeReport" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for reporting the status of the DataNode.]]>
- </doc>
- </method>
- <method name="dumpDatanode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for printing the status of the DataNode.]]>
- </doc>
- </method>
- <method name="startDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Start decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="isDecommissionInProgress" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node is in the process of being decommissioned]]>
- </doc>
- </method>
- <method name="isDecommissioned" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node has been decommissioned.]]>
- </doc>
- </method>
- <method name="setDecommissioned"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Sets the admin state to indicate that decommission is complete.]]>
- </doc>
- </method>
- <method name="setAdminState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
- <doc>
- <![CDATA[Sets the admin state of this node.]]>
- </doc>
- </method>
- <method name="getParent" return="org.apache.hadoop.net.Node"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's parent]]>
- </doc>
- </method>
- <method name="setParent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="org.apache.hadoop.net.Node"/>
- </method>
- <method name="getLevel" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's level in the tree.
- E.g. the root of a tree returns 0 and its children return 1]]>
- </doc>
- </method>
- <method name="setLevel"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="level" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read a DatanodeInfo]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- </method>
- <field name="capacity" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="dfsUsed" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="remaining" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="lastUpdate" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="xceiverCount" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="location" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="hostName" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[HostName as supplied by the datanode during registration as its
- name. Namenode uses datanode IP address as the name.]]>
- </doc>
- </field>
- <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeInfo represents the status of a DataNode.
- This object is used for communication in the
- Datanode Protocol and the Client Protocol.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <interface name="DataTransferProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="DATA_TRANSFER_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Version for data transfers between clients and datanodes
- This should change when serialization of DatanodeInfo, not just
- when protocol changes. It is not very obvious.]]>
- </doc>
- </field>
- <field name="OP_WRITE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.WRITE_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.WRITE_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_READ_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.READ_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.READ_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_READ_METADATA" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="As of version 15, OP_READ_METADATA is no longer supported.">
- <doc>
- <![CDATA[@deprecated As of version 15, OP_READ_METADATA is no longer supported.]]>
- </doc>
- </field>
- <field name="OP_REPLACE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.REPLACE_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_COPY_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.COPY_BLOCK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.COPY_BLOCK instead.]]>
- </doc>
- </field>
- <field name="OP_BLOCK_CHECKSUM" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Op.BLOCK_CHECKSUM instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.SUCCESS instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.SUCCESS instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_CHECKSUM instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_INVALID" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_INVALID instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_INVALID instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_EXISTS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_EXISTS instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_EXISTS instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_ERROR_ACCESS_TOKEN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.ERROR_ACCESS_TOKEN instead.]]>
- </doc>
- </field>
- <field name="OP_STATUS_CHECKSUM_OK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="Deprecated at 0.21. Use Status.CHECKSUM_OK instead.">
- <doc>
- <![CDATA[@deprecated Deprecated at 0.21. Use Status.CHECKSUM_OK instead.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Transfer data to/from datanode using a streaming protocol.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
- <class name="DataTransferProtocol.BlockConstructionStage" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getRecoveryStage" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the recovery stage of this stage]]>
- </doc>
- </method>
- <field name="PIPELINE_SETUP_APPEND" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The enumerates are always listed as regular stage followed by the
- recovery stage.
- Changing this order will make getRecoveryStage not working.]]>
- </doc>
- </field>
- <field name="PIPELINE_SETUP_APPEND_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DATA_STREAMING" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_SETUP_STREAMING_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_CLOSE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_CLOSE_RECOVERY" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PIPELINE_SETUP_CREATE" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
- <class name="DataTransferProtocol.Op" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="WRITE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_METADATA" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REPLACE_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COPY_BLOCK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCK_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="code" type="byte"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The code for this operation.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Operation]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader -->
- <class name="DataTransferProtocol.PacketHeader" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DataTransferProtocol.PacketHeader"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DataTransferProtocol.PacketHeader" type="int, long, long, boolean, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDataLen" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isLastPacketInBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSeqno" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getOffsetInBlock" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPacketLen" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="java.nio.ByteBuffer"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="putInBuffer"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="java.nio.ByteBuffer"/>
- <doc>
- <![CDATA[Write the header into the buffer.
- This requires that PKT_HEADER_LEN bytes are available.]]>
- </doc>
- </method>
- <method name="sanityCheck" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="lastSeqNo" type="long"/>
- <doc>
- <![CDATA[Perform a sanity check on the packet, returning true if it is sane.
- @param lastSeqNo the previous sequence number received - we expect the current
- sequence number to be larger by 1.]]>
- </doc>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="PKT_HEADER_LEN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Header size for a packet]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Header data for each packet that goes through the read/write pipelines.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
- <class name="DataTransferProtocol.PipelineAck" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DataTransferProtocol.PipelineAck"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="DataTransferProtocol.PipelineAck" type="long, org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param seqno sequence number
- @param replies an array of replies]]>
- </doc>
- </constructor>
- <method name="getSeqno" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the sequence number
- @return the sequence number]]>
- </doc>
- </method>
- <method name="getNumOfReplies" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of replies
- @return the number of replies]]>
- </doc>
- </method>
- <method name="getReply" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="i" type="int"/>
- <doc>
- <![CDATA[get the ith reply
- @return the the ith reply]]>
- </doc>
- </method>
- <method name="isSuccess" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if this ack contains error status
- @return true if all statuses are SUCCESS]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Writable interface]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="UNKOWN_SEQNO" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[reply]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
- <class name="DataTransferProtocol.Receiver" extends="java.lang.Object"
- abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataTransferProtocol.Receiver"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="readOp" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read an Op. It also checks protocol version.]]>
- </doc>
- </method>
- <method name="processOp"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process op by the corresponding method.]]>
- </doc>
- </method>
- <method name="opReadBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_READ_BLOCK method. Read a block.]]>
- </doc>
- </method>
- <method name="opWriteBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="pipelineSize" type="int"/>
- <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
- <param name="newGs" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_WRITE_BLOCK method.
- Write a block.]]>
- </doc>
- </method>
- <method name="opReplaceBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="sourceId" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_REPLACE_BLOCK method.
- It is used for balancing purpose; send to a destination]]>
- </doc>
- </method>
- <method name="opCopyBlock"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_COPY_BLOCK method. It is used for balancing purpose; send to
- a proxy source.]]>
- </doc>
- </method>
- <method name="opBlockChecksum"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Abstract OP_BLOCK_CHECKSUM method.
- Get the checksum of a block]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Receiver]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Receiver -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
- <class name="DataTransferProtocol.Sender" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataTransferProtocol.Sender"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="op"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="op" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a operation.]]>
- </doc>
- </method>
- <method name="opReadBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockOffset" type="long"/>
- <param name="blockLen" type="long"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_READ_BLOCK]]>
- </doc>
- </method>
- <method name="opWriteBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="pipelineSize" type="int"/>
- <param name="stage" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage"/>
- <param name="newGs" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <param name="client" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_WRITE_BLOCK]]>
- </doc>
- </method>
- <method name="opReplaceBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="storageId" type="java.lang.String"/>
- <param name="src" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_REPLACE_BLOCK]]>
- </doc>
- </method>
- <method name="opCopyBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_COPY_BLOCK]]>
- </doc>
- </method>
- <method name="opBlockChecksum"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Send OP_BLOCK_CHECKSUM]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Sender]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Sender -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
- <class name="DataTransferProtocol.Status" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <method name="writeOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.OutputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="SUCCESS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_INVALID" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_EXISTS" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ERROR_ACCESS_TOKEN" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKSUM_OK" type="org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Status]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
- <class name="DirectoryListing" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DirectoryListing"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="DirectoryListing" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[], int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[constructor
- @param partialListing a partial listing of a directory
- @param remainingEntries number of entries that are left to be listed]]>
- </doc>
- </constructor>
- <method name="getPartialListing" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the partial listing of file status
- @return the partial listing of file status]]>
- </doc>
- </method>
- <method name="getRemainingEntries" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of remaining entries that are left to be listed
- @return the number of remaining entries that are left to be listed]]>
- </doc>
- </method>
- <method name="hasMore" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if there are more entries that are left to be listed
- @return true if there are more entries that are left to be listed;
- return false otherwise.]]>
- </doc>
- </method>
- <method name="getLastName" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the last name in this list
- @return the last name in the list if it is not empty; otherwise return null]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class defines a partial listing of a directory to support
- iterative directory listing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DirectoryListing -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
- <class name="DSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DSQuotaExceededException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DSQuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DSQuotaExceededException" type="long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DSQuotaExceededException -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <interface name="FSConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="MIN_BLOCKS_FOR_WRITE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCK_INVALIDATE_CHUNK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_DONT_SET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_RESET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEARTBEAT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_HARDLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_RECOVER_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_LENGTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_DEPTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SMALL_BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_BLOCK_SIZE" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_BYTES_PER_CHECKSUM" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_WRITE_PACKET_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_REPLICATION_FACTOR" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_FILE_BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SIZE_OF_INTEGER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HDFS_URI_SCHEME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[URI Scheme for hdfs://namenode/ URIs.]]>
- </doc>
- </field>
- <field name="LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Please see {@link LayoutVersion} on adding new layout version.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Some handy constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Distributed upgrade actions:
-
- 1. Get upgrade status.
- 2. Get detailed upgrade status.
- 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
- <class name="HdfsFileStatus" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="HdfsFileStatus"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="HdfsFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param length the number of bytes the file has
- @param isdir if the path is a directory
- @param block_replication the replication factor
- @param blocksize the block size
- @param modification_time modification time
- @param access_time access time
- @param permission permission
- @param owner the owner of the path
- @param group the group of the path
- @param path the local name in java UTF8 encoding the same as that in-memory]]>
- </doc>
- </constructor>
- <method name="getLen" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the length of this file, in bytes.
- @return the length of this file, in bytes.]]>
- </doc>
- </method>
- <method name="isDir" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is this a directory?
- @return true if this is a directory]]>
- </doc>
- </method>
- <method name="isSymlink" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is this a symbolic link?
- @return true if this is a symbolic link]]>
- </doc>
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the block size of the file.
- @return the number of bytes]]>
- </doc>
- </method>
- <method name="getReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the replication factor of a file.
- @return the replication factor of a file.]]>
- </doc>
- </method>
- <method name="getModificationTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the modification time of the file.
- @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
- </doc>
- </method>
- <method name="getAccessTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the access time of the file.
- @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
- </doc>
- </method>
- <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get FsPermission associated with the file.
- @return permssion]]>
- </doc>
- </method>
- <method name="getOwner" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the owner of the file.
- @return owner of the file]]>
- </doc>
- </method>
- <method name="getGroup" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the group associated with the file.
- @return group for the file.]]>
- </doc>
- </method>
- <method name="isEmptyLocalName" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if the local name is empty
- @return true if the name is empty]]>
- </doc>
- </method>
- <method name="getLocalName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the string representation of the local name
- @return the local name in string]]>
- </doc>
- </method>
- <method name="getLocalNameInBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the Java UTF8 representation of the local name
- @return the local name in java UTF8]]>
- </doc>
- </method>
- <method name="getFullName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="java.lang.String"/>
- <doc>
- <![CDATA[Get the string representation of the full path name
- @param parent the parent path
- @return the full path in string]]>
- </doc>
- </method>
- <method name="getFullPath" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Get the full path
- @param parent the parent path
- @return the full path]]>
- </doc>
- </method>
- <method name="getSymlink" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the string representation of the symlink.
- @return the symlink as a string.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="EMPTY_NAME" type="byte[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Interface that represents the over the wire information for a file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.HdfsFileStatus -->
- <!-- start class org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus -->
- <class name="HdfsLocatedFileStatus" extends="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HdfsLocatedFileStatus"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor]]>
- </doc>
- </constructor>
- <constructor name="HdfsLocatedFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, byte[], byte[], org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
-
- @param length size
- @param isdir if this is directory
- @param block_replication the file's replication factor
- @param blocksize the file's block size
- @param modification_time most recent modification time
- @param access_time most recent access time
- @param permission permission
- @param owner owner
- @param group group
- @param symlink symbolic link
- @param path local path name in java UTF8 format
- @param locations block locations]]>
- </doc>
- </constructor>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Interface that represents the over the wire information
- including block locations for a file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion -->
- <class name="LayoutVersion" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="LayoutVersion"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets formatted string that describes {@link LayoutVersion} information.]]>
- </doc>
- </method>
- <method name="supports" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"/>
- <param name="lv" type="int"/>
- <doc>
- <![CDATA[Returns true if a given feature is supported in the given layout version
- @param f Feature
- @param lv LayoutVersion
- @return true if {@code f} is supported in layout version {@code lv}]]>
- </doc>
- </method>
- <method name="getCurrentLayoutVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the current layout version]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class tracks changes in the layout version of HDFS.
-
- Layout version is changed for following reasons:
- <ol>
- <li>The layout of how namenode or datanode stores information
- on disk changes.</li>
- <li>A new operation code is added to the editlog.</li>
- <li>Modification such as format of a record, content of a record
- in editlog or fsimage.</li>
- </ol>
- <br>
- <b>How to update layout version:<br></b>
- When a change requires new layout version, please add an entry into
- {@link Feature} with a short enum name, new layout version and description
- of the change. Please see {@link Feature} for further details.
- <br>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
- <class name="LayoutVersion.Feature" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NAMESPACE_QUOTA" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FILE_ACCESS_TIME" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DISKSPACE_QUOTA" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="STICKY_BIT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="APPEND_RBW_DIR" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ATOMIC_RENAME" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CONCAT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SYMLINKS" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DELEGATION_TOKEN" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FSIMAGE_COMPRESSION" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FSIMAGE_CHECKSUM" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REMOVE_REL13_DISK_LAYOUT_SUPPORT" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UNUSED_28" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UNUSED_29" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UNUSED_30" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RESERVED_REL20_203" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RESERVED_REL20_204" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RESERVED_REL22" type="org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Enums for features that change the layout version.
- <br><br>
- To add a new layout version:
- <ul>
- <li>Define a new enum constant with a short enum name, the new layout version
- and description of the added feature.</li>
- <li>When adding a layout version with an ancestor that is not same as
- its immediate predecessor, use the constructor where a spacific ancestor
- can be passed.
- </li>
- </ul>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <class name="LocatedBlock" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlock"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBlockToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setBlockToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- </method>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStartOffset" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isCorrupt" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read LocatedBlock from in.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
- objects. It tells where to find a Block.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <class name="LocatedBlocks" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlocks"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlocks" type="long, boolean, java.util.List, org.apache.hadoop.hdfs.protocol.LocatedBlock, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[public Constructor]]>
- </doc>
- </constructor>
- <method name="getLocatedBlocks" return="java.util.List"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get located blocks.]]>
- </doc>
- </method>
- <method name="getLastLocatedBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the last located block.]]>
- </doc>
- </method>
- <method name="isLastBlockComplete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is the last block completed?]]>
- </doc>
- </method>
- <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[Get located block.]]>
- </doc>
- </method>
- <method name="locatedBlockCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get number of located blocks.]]>
- </doc>
- </method>
- <method name="getFileLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isUnderConstruction" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return ture if file was under construction when
- this LocatedBlocks was constructed, false otherwise.]]>
- </doc>
- </method>
- <method name="findBlock" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="offset" type="long"/>
- <doc>
- <![CDATA[Find block containing specified offset.
-
- @return block if found, or null otherwise.]]>
- </doc>
- </method>
- <method name="insertRange"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockIdx" type="int"/>
- <param name="newBlocks" type="java.util.List"/>
- </method>
- <method name="getInsertIndex" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="binSearchResult" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Collection of blocks with their locations and the file length.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <!-- start class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
- <class name="NSQuotaExceededException" extends="org.apache.hadoop.hdfs.protocol.QuotaExceededException"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="NSQuotaExceededException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NSQuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NSQuotaExceededException" type="long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.NSQuotaExceededException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <class name="QuotaExceededException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="QuotaExceededException"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="QuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="QuotaExceededException" type="long, long"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="setPathName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- </method>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="serialVersionUID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="pathName" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="quota" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="count" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This exception is thrown when modification to HDFS results in violation
- of a directory quota. A directory quota might be namespace quota (limit
- on number of files and directories) or a diskspace quota (limit on space
- taken by all the file under the directory tree). <br> <br>
-
- The message for the exception specifies the directory where the quota
- was violated and actual quotas. Specific message is generated in the
- corresponding Exception class:
- DSQuotaExceededException or
- NSQuotaExceededException]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
- <class name="RecoveryInProgressException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="RecoveryInProgressException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Exception indicating that a replica is already being recovery.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.RecoveryInProgressException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
- <class name="UnregisteredNodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UnregisteredNodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The exception is thrown if a different data-node claims the same
- storage id as the existing one.
-
- @param nodeID unregistered data-node
- @param storedNode data-node stored in the system with this storage id]]>
- </doc>
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a node that has not previously
- registered is trying to access the name node.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredNodeException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
- <class name="UnresolvedPathException" extends="org.apache.hadoop.fs.UnresolvedLinkException"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnresolvedPathException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Used by RemoteException to instantiate an UnresolvedPathException.]]>
- </doc>
- </constructor>
- <constructor name="UnresolvedPathException" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getResolvedPath" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return a path with the link resolved with the target.]]>
- </doc>
- </method>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Thrown when a symbolic link is encountered in a path.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.UnresolvedPathException -->
- </package>
- <package name="org.apache.hadoop.hdfs.security.token.block">
- <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockKey -->
- <class name="BlockKey" extends="org.apache.hadoop.security.token.delegation.DelegationKey"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockKey"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockKey" type="int, long, javax.crypto.SecretKey"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Key used for generating and verifying block tokens]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockKey -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier -->
- <class name="BlockTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockTokenIdentifier"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockTokenIdentifier" type="java.lang.String, long, java.util.EnumSet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getKind" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getExpiryDate" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setExpiryDate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="expiryDate" type="long"/>
- </method>
- <method name="getKeyId" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setKeyId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="keyId" type="int"/>
- </method>
- <method name="getUserId" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAccessModes" return="java.util.EnumSet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager -->
- <class name="BlockTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockTokenSecretManager" type="boolean, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Constructor
-
- @param isMaster
- @param keyUpdateInterval
- @param tokenLifetime
- @throws IOException]]>
- </doc>
- </constructor>
- <method name="exportKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Export block keys, only to be used in master mode]]>
- </doc>
- </method>
- <method name="setKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="exportedKeys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set block keys, only to be used in slave mode]]>
- </doc>
- </method>
- <method name="updateKeys"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update block keys, only to be used in master mode]]>
- </doc>
- </method>
- <method name="generateToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="modes" type="java.util.EnumSet"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Generate an block token for current user]]>
- </doc>
- </method>
- <method name="generateToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="userId" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="modes" type="java.util.EnumSet"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Generate a block token for a specified user]]>
- </doc>
- </method>
- <method name="checkAccess"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="id" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
- <param name="userId" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <doc>
- <![CDATA[Check if access should be allowed. userID is not checked if null. This
- method doesn't check if token password is correct. It should be used only
- when token password has already been verified (e.g., in the RPC layer).]]>
- </doc>
- </method>
- <method name="checkAccess"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <param name="userId" type="java.lang.String"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="mode" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <doc>
- <![CDATA[Check if access should be allowed. userID is not checked if null]]>
- </doc>
- </method>
- <method name="setTokenLifetime"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="tokenLifetime" type="long"/>
- <doc>
- <![CDATA[set token lifetime.]]>
- </doc>
- </method>
- <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create an empty block token identifier
-
- @return a newly created empty block token identifier]]>
- </doc>
- </method>
- <method name="createPassword" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
- <doc>
- <![CDATA[Create a new password/secret for the given block token identifier.
-
- @param identifier
- the block token identifier
- @return token password/secret]]>
- </doc>
- </method>
- <method name="retrievePassword" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <doc>
- <![CDATA[Look up the token password/secret for the given block token identifier.
-
- @param identifier
- the block token identifier to look up
- @return token password/secret as byte[]
- @throws InvalidToken]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DUMMY_TOKEN" type="org.apache.hadoop.security.token.Token"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[BlockTokenSecretManager can be instantiated in 2 modes, master mode and slave
- mode. Master can generate new block keys and export block keys to slaves,
- while slaves can only import and use block keys received from master. Both
- master and slave can generate and verify block tokens. Typically, master mode
- is used by NN and slave mode is used by DN.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
- <class name="BlockTokenSecretManager.AccessMode" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="READ" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COPY" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REPLACE" type="org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector -->
- <class name="BlockTokenSelector" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.security.token.TokenSelector"/>
- <constructor name="BlockTokenSelector"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="selectToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="service" type="org.apache.hadoop.io.Text"/>
- <param name="tokens" type="java.util.Collection"/>
- </method>
- <doc>
- <![CDATA[A block token selector for HDFS]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys -->
- <class name="ExportedBlockKeys" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="ExportedBlockKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isBlockTokenEnabled" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getKeyUpdateInterval" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTokenLifetime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentKey" return="org.apache.hadoop.hdfs.security.token.block.BlockKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAllKeys" return="org.apache.hadoop.hdfs.security.token.block.BlockKey[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="DUMMY_KEYS" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Object for passing block keys]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys -->
- <!-- start class org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException -->
- <class name="InvalidBlockTokenException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="InvalidBlockTokenException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="InvalidBlockTokenException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Access token verification failed.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException -->
- </package>
- <package name="org.apache.hadoop.hdfs.security.token.delegation">
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
- <class name="DelegationTokenIdentifier" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenIdentifier"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create an empty delegation token identifier for reading into.]]>
- </doc>
- </constructor>
- <constructor name="DelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a new delegation token identifier
- @param owner the effective username of the token owner
- @param renewer the username of the renewer
- @param realUser the real username of the token owner]]>
- </doc>
- </constructor>
- <method name="getKind" return="org.apache.hadoop.io.Text"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="HDFS_DELEGATION_KIND" type="org.apache.hadoop.io.Text"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A delegation token identifier that is specific to HDFS.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier -->
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
- <class name="DelegationTokenSecretManager" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenSecretManager" type="long, long, long, long, org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a secret manager
- @param delegationKeyUpdateInterval the number of seconds for rolling new
- secret keys.
- @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- tokens
- @param delegationTokenRenewInterval how often the tokens must be renewed
- @param delegationTokenRemoverScanInterval how often the tokens are scanned
- for expired tokens]]>
- </doc>
- </constructor>
- <method name="createIdentifier" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTokenExpiryTime" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dtId" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns expiry time of a token given its identifier.
-
- @param dtId DelegationTokenIdentifier of a token
- @return Expiry time of the token
- @throws IOException]]>
- </doc>
- </method>
- <method name="loadSecretManagerState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Load SecretManager state from fsimage.
-
- @param in input stream to read fsimage
- @throws IOException]]>
- </doc>
- </method>
- <method name="saveSecretManagerState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Store the current state of the SecretManager for persistence
-
- @param out Output stream for writing into fsimage.
- @throws IOException]]>
- </doc>
- </method>
- <method name="addPersistedDelegationToken"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <param name="expiryTime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This method is intended to be used only while reading edit logs.
-
- @param identifier DelegationTokenIdentifier read from the edit logs or
- fsimage
-
- @param expiryTime token expiry time
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedMasterKey"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Add a MasterKey to the list of keys.
-
- @param key DelegationKey
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedTokenRenewal"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <param name="expiryTime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the token cache with renewal record in edit logs.
-
- @param identifier DelegationTokenIdentifier of the renewed token
- @param expiryTime
- @throws IOException]]>
- </doc>
- </method>
- <method name="updatePersistedTokenCancellation"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="identifier" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the token cache with the cancel record in edit logs
-
- @param identifier DelegationTokenIdentifier of the canceled token
- @throws IOException]]>
- </doc>
- </method>
- <method name="getNumberOfKeys" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the number of delegation keys currently stored.
- @return number of delegation keys]]>
- </doc>
- </method>
- <method name="logUpdateMasterKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call namesystem to update editlogs for new master key.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A HDFS specific delegation token secret manager.
- The secret manager is responsible for generating and accepting the password
- for each token.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager -->
- <!-- start class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
- <class name="DelegationTokenSelector" extends="org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenSelector"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[A delegation token that is specialized for HDFS]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.balancer">
- <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- <class name="Balancer" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <doc>
- <![CDATA[Run a balancer
- @param args]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main method of Balancer
- @param args arguments to a Balancer
- @throws Exception exception that occured during datanode balancing]]>
- </doc>
- </method>
- <method name="getConf" return="org.apache.hadoop.conf.Configuration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[return this balancer's configuration]]>
- </doc>
- </method>
- <method name="setConf"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[set this balancer's configuration]]>
- </doc>
- </method>
- <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The maximum number of concurrent blocks moves for
- balancing purpose at a datanode]]>
- </doc>
- </field>
- <field name="SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ALREADY_RUNNING" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_PROGRESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IO_EXCEPTION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ILLEGAL_ARGS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
- when some datanodes become full or when new empty nodes join the cluster.
- The tool is deployed as an application program that can be run by the
- cluster administrator on a live HDFS cluster while applications
- adding and deleting files.
-
- <p>SYNOPSIS
- <pre>
- To start:
- bin/start-balancer.sh [-threshold <threshold>]
- Example: bin/ start-balancer.sh
- start the balancer with a default threshold of 10%
- bin/ start-balancer.sh -threshold 5
- start the balancer with a threshold of 5%
- To stop:
- bin/ stop-balancer.sh
- </pre>
-
- <p>DESCRIPTION
- <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
- default value of 10%. The threshold sets a target for whether the cluster
- is balanced. A cluster is balanced if for each datanode, the utilization
- of the node (ratio of used space at the node to total capacity of the node)
- differs from the utilization of the (ratio of used space in the cluster
- to total capacity of the cluster) by no more than the threshold value.
- The smaller the threshold, the more balanced a cluster will become.
- It takes more time to run the balancer for small threshold values.
- Also for a very small threshold the cluster may not be able to reach the
- balanced state when applications write and delete files concurrently.
-
- <p>The tool moves blocks from highly utilized datanodes to poorly
- utilized datanodes iteratively. In each iteration a datanode moves or
- receives no more than the lesser of 10G bytes or the threshold fraction
- of its capacity. Each iteration runs no more than 20 minutes.
- At the end of each iteration, the balancer obtains updated datanodes
- information from the namenode.
-
- <p>A system property that limits the balancer's use of bandwidth is
- defined in the default configuration file:
- <pre>
- <property>
- <name>dfs.balance.bandwidthPerSec</name>
- <value>1048576</value>
- <description> Specifies the maximum bandwidth that each datanode
- can utilize for the balancing purpose in term of the number of bytes
- per second. </description>
- </property>
- </pre>
-
- <p>This property determines the maximum speed at which a block will be
- moved from one datanode to another. The default value is 1MB/s. The higher
- the bandwidth, the faster a cluster can reach the balanced state,
- but with greater competition with application processes. If an
- administrator changes the value of this property in the configuration
- file, the change is observed when HDFS is next restarted.
-
- <p>MONITERING BALANCER PROGRESS
- <p>After the balancer is started, an output file name where the balancer
- progress will be recorded is printed on the screen. The administrator
- can monitor the running of the balancer by reading the output file.
- The output shows the balancer's status iteration by iteration. In each
- iteration it prints the starting time, the iteration number, the total
- number of bytes that have been moved in the previous iterations,
- the total number of bytes that are left to move in order for the cluster
- to be balanced, and the number of bytes that are being moved in this
- iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
- To Move" is decreasing.
-
- <p>Running multiple instances of the balancer in an HDFS cluster is
- prohibited by the tool.
-
- <p>The balancer automatically exits when any of the following five
- conditions is satisfied:
- <ol>
- <li>The cluster is balanced;
- <li>No block can be moved;
- <li>No block has been moved for five consecutive iterations;
- <li>An IOException occurs while communicating with the namenode;
- <li>Another balancer is running.
- </ol>
-
- <p>Upon exit, a balancer returns an exit code and prints one of the
- following messages to the output file in corresponding to the above exit
- reasons:
- <ol>
- <li>The cluster is balanced. Exiting
- <li>No block can be moved. Exiting...
- <li>No block has been moved for 3 iterations. Exiting...
- <li>Received an IO exception: failure reason. Exiting...
- <li>Another balancer is running. Exiting...
- </ol>
-
- <p>The administrator can interrupt the execution of the balancer at any
- time by running the command "stop-balancer.sh" on the machine where the
- balancer is running.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.common">
- <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <class name="GenerationStamp" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Comparable"/>
- <constructor name="GenerationStamp"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
- </doc>
- </constructor>
- <method name="getStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the current generation stamp]]>
- </doc>
- </method>
- <method name="setStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the current generation stamp]]>
- </doc>
- </method>
- <method name="nextStamp" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[First increments the counter and then returns the stamp]]>
- </doc>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="FIRST_VALID_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The first valid generation stamp.]]>
- </doc>
- </field>
- <field name="GRANDFATHER_GENERATION_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Generation stamp of blocks that pre-date the introduction
- of a generation stamp.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <interface name="HdfsConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="READ_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="READ_TIMEOUT_EXTENSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT_EXTENSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DN_KEEPALIVE_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Some handy internal HDFS constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
- <class name="HdfsConstants.BlockUCState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="COMPLETE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Block construction completed.<br>
- The block has at least one {@link ReplicaState#FINALIZED} replica,
- and is not going to be modified.]]>
- </doc>
- </field>
- <field name="UNDER_CONSTRUCTION" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is under construction.<br>
- It has been recently allocated for write or append.]]>
- </doc>
- </field>
- <field name="UNDER_RECOVERY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is under recovery.<br>
- When a file lease expires its last block may not be {@link #COMPLETE}
- and needs to go through a recovery procedure,
- which synchronizes the existing replicas contents.]]>
- </doc>
- </field>
- <field name="COMMITTED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The block is committed.<br>
- The client reported that all bytes are written to data-nodes
- with the given generation stamp and block length, but no
- {@link ReplicaState#FINALIZED}
- replicas has yet been reported by data-nodes themselves.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[States, which a block can go through while it is under construction.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
- <class name="HdfsConstants.NamenodeRole" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="ACTIVE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="STANDBY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Defines the NameNode role.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Type of the node]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
- <class name="HdfsConstants.ReplicaState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getValue" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="v" type="int"/>
- </method>
- <method name="read" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read from in]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write to out]]>
- </doc>
- </method>
- <field name="FINALIZED" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is finalized. The state when replica is not modified.]]>
- </doc>
- </field>
- <field name="RBW" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is being written to.]]>
- </doc>
- </field>
- <field name="RWR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is waiting to be recovered.]]>
- </doc>
- </field>
- <field name="RUR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Replica is under recovery.]]>
- </doc>
- </field>
- <field name="TEMPORARY" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Temporary replica: created for replication and relocation only.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Block replica states, which it can go through while being constructed.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toNodeRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BACKUP" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Startup options]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <class name="InconsistentFSStateException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when file system state is inconsistent
- and is not recoverable.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <class name="IncorrectVersionException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="IncorrectVersionException" type="int, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when external version does not match
- current version of the application.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.JspHelper -->
- <class name="JspHelper" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blks" type="org.apache.hadoop.hdfs.protocol.LocatedBlocks"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <param name="doRandom" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="streamBlockInAscii"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="addr" type="java.net.InetSocketAddress"/>
- <param name="blockId" type="long"/>
- <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
- <param name="genStamp" type="long"/>
- <param name="blockSize" type="long"/>
- <param name="offsetIntoBlock" type="long"/>
- <param name="chunkSizeToView" type="long"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableHeader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <param name="row" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableFooter"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="sortNodeList"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodes" type="java.util.ArrayList"/>
- <param name="field" type="java.lang.String"/>
- <param name="order" type="java.lang.String"/>
- </method>
- <method name="printPathWithLinks"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="java.lang.String"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <param name="tokenString" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="printGotoForm"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <param name="tokenString" type="java.lang.String"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createTitle"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="string2ChunkSizeToView" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="s" type="java.lang.String"/>
- <param name="defaultValue" type="int"/>
- <doc>
- <![CDATA[Convert a String to chunk-size-to-view.]]>
- </doc>
- </method>
- <method name="getVersionTable" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return a table containing version information.]]>
- </doc>
- </method>
- <method name="validatePath" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate filename.
- @return null if the filename is invalid.
- Otherwise, return the validated filename.]]>
- </doc>
- </method>
- <method name="validateLong" return="java.lang.Long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="value" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate a long value.
- @return null if the value is invalid.
- Otherwise, return the validated Long object.]]>
- </doc>
- </method>
- <method name="validateURL" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="value" type="java.lang.String"/>
- <doc>
- <![CDATA[Validate a URL.
- @return null if the value is invalid.
- Otherwise, return the validated URL String.]]>
- </doc>
- </method>
- <method name="getDefaultWebUser" return="org.apache.hadoop.security.UserGroupInformation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[If security is turned off, what is the default web user?
- @param conf the configuration to look in
- @return the remote user that was configuration]]>
- </doc>
- </method>
- <method name="getUGI" return="org.apache.hadoop.security.UserGroupInformation"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get {@link UserGroupInformation} and possibly the delegation token out of
- the request.
- @param request the http request
- @return a new user from the request
- @throws AccessControlException if the request has no token]]>
- </doc>
- </method>
- <method name="getDelegationTokenUrlParam" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="tokenString" type="java.lang.String"/>
- <doc>
- <![CDATA[Returns the url parameter for the given token string.
- @param tokenString
- @return url parameter]]>
- </doc>
- </method>
- <method name="getUrlParam" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- <param name="val" type="java.lang.String"/>
- <param name="paramSeparator" type="java.lang.String"/>
- <doc>
- <![CDATA[Returns the url parameter for the given string, prefixed with
- paramSeparator.
-
- @param name parameter name
- @param val parameter value
- @param paramSeparator URL parameter prefix, i.e. either '?' or '&'
- @return url parameter]]>
- </doc>
- </method>
- <method name="getUrlParam" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- <param name="val" type="java.lang.String"/>
- <param name="firstParam" type="boolean"/>
- <doc>
- <![CDATA[Returns the url parameter for the given string, prefixed with '?' if
- firstParam is true, prefixed with '&' if firstParam is false.
-
- @param name parameter name
- @param val parameter value
- @param firstParam true if this is the first parameter in the list, false otherwise
- @return url parameter]]>
- </doc>
- </method>
- <method name="getUrlParam" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- <param name="val" type="java.lang.String"/>
- <doc>
- <![CDATA[Returns the url parameter for the given string, prefixed with '&'.
- @param name parameter name
- @param val parameter value
- @return url parameter]]>
- </doc>
- </method>
- <field name="CURRENT_CONF" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DELEGATION_PARAMETER_NAME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.JspHelper -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
- <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty storage info of the specified type]]>
- </doc>
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return default iterator
- This iterator returns all entries in storageDirs]]>
- </doc>
- </method>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- <doc>
- <![CDATA[Return iterator based on Storage Directory Type
- This iterator selects entries in storageDirs of type dirType and returns
- them via the Iterator]]>
- </doc>
- </method>
- <method name="listStorageDirectories" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[generate storage list (debug line)]]>
- </doc>
- </method>
- <method name="getNumStorageDirs" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- </method>
- <method name="addStorageDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- </method>
- <method name="isPreUpgradableLayout" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return true if the layout of the given storage directory is from a version
- of Hadoop prior to the introduction of the "current" and "previous"
- directories which allow upgrade and rollback.]]>
- </doc>
- </method>
- <method name="checkVersionUpgradable"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="oldVersion" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Checks if the upgrade from the given old version is supported. If
- no upgrade is supported, it throws IncorrectVersionException.
-
- @param oldVersion]]>
- </doc>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get common storage fields.
- Should be overloaded if additional fields need to be get.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set common storage fields.
- Should be overloaded if additional fields need to be set.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="deleteDir"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="dir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write all data storage files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="unlockAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock all storage directories.
- @throws IOException]]>
- </doc>
- </method>
- <method name="isLockSupported" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check whether underlying file system supports file locking.
-
- @return <code>true</code> if exclusive locks are supported or
- <code>false</code> otherwise.
- @throws IOException
- @see StorageDirectory#lock()]]>
- </doc>
- </method>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <method name="is203LayoutVersion" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="layoutVersion" type="int"/>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAYOUT_VERSIONS_203" type="int[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Layout versions of 0.20.203 release]]>
- </doc>
- </field>
- <field name="STORAGE_FILE_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Storage information file.
- <p>
- Local storage information is stored in a separate file VERSION.
- It contains type of the node,
- the storage layout version, the namespace id, and
- the fs state creation time.
- <p>
- Local storage can reside in multiple directories.
- Each directory should contain the same VERSION file as the others.
- During startup Hadoop servers (name-node and data-nodes) read their local
- storage information from them.
- <p>
- The servers hold a lock for each storage directory while they run so that
- other nodes were not able to startup sharing the same storage.
- The locks are released when the servers stop (normally or abnormally).]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <class name="Storage.StorageDirectory" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage.StorageDirectory" type="java.io.File"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getRoot" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get root directory of this storage]]>
- </doc>
- </method>
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get storage directory type]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read version file.
-
- @throws IOException if file cannot be read or contains inconsistent data]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write version file.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="clearDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Clear and re-create storage directory.
- <p>
- Removes contents of the current directory and creates an empty directory.
-
- This does not fully format storage directory.
- It cannot write the version file since it should be written last after
- all other storage type dependent files are written.
- Derived storage is responsible for setting specific storage values and
- writing the version file to disk.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCurrentDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Directory {@code current} contains latest files defining
- the file system meta-data.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[File {@code VERSION} contains the following fields:
- <ol>
- <li>node type</li>
- <li>layout version</li>
- <li>namespaceID</li>
- <li>fs state creation time</li>
- <li>other fields specific for this node type</li>
- </ol>
- The version file is always written last during storage directory updates.
- The existence of the version file indicates that all other files have
- been successfully written in the storage directory, the storage is valid
- and does not need to be recovered.
-
- @return the version file path]]>
- </doc>
- </method>
- <method name="getPreviousVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[File {@code VERSION} from the {@code previous} directory.
-
- @return the previous version file path]]>
- </doc>
- </method>
- <method name="getPreviousDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Directory {@code previous} contains the previous file system state,
- which the system can be rolled back to.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getPreviousTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code previous.tmp} is a transient directory, which holds
- current file system state while the new state is saved into the new
- {@code current} during upgrade.
- If the saving succeeds {@code previous.tmp} will be moved to
- {@code previous}, otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getRemovedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code removed.tmp} is a transient directory, which holds
- current file system state while the previous state is moved into
- {@code current} during rollback.
- If the moving succeeds {@code removed.tmp} will be removed,
- otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getFinalizedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code finalized.tmp} is a transient directory, which holds
- the {@code previous} file system state while it is being removed
- in response to the finalize request.
- Finalize operation will remove {@code finalized.tmp} when completed,
- otherwise the removal will resume upon the system startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getLastCheckpointTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code lastcheckpoint.tmp} is a transient directory, which holds
- current file system state while the new state is saved into the new
- {@code current} during regular namespace updates.
- If the saving succeeds {@code lastcheckpoint.tmp} will be moved to
- {@code previous.checkpoint}, otherwise it will be renamed back to
- {@code current} by the recovery procedure during startup.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="getPreviousCheckpoint" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@code previous.checkpoint} is a directory, which holds the previous
- (before the last save) state of the storage directory.
- The directory is created as a reference only, it does not play role
- in state recovery procedures, and is recycled automatically,
- but it may be useful for manual recovery of a stale state of the system.
-
- @return the directory path]]>
- </doc>
- </method>
- <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check consistency of the storage directory
-
- @param startOpt a startup option.
-
- @return state {@link StorageState} of the storage directory
- @throws InconsistentFSStateException if directory state is not
- consistent and cannot be recovered.
- @throws IOException]]>
- </doc>
- </method>
- <method name="doRecover"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete or recover storage state from previously failed transition.
-
- @param curState specifies what/how the state should be recovered
- @throws IOException]]>
- </doc>
- </method>
- <method name="lock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Lock storage to provide exclusive access.
-
- <p> Locking is not supported by all file systems.
- E.g., NFS does not consistently support exclusive locks.
-
- <p> If locking is supported we guarantee exculsive access to the
- storage directory. Otherwise, no guarantee is given.
-
- @throws IOException if locking fails]]>
- </doc>
- </method>
- <method name="unlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock storage.
-
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[One of the storage directories.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <interface name="Storage.StorageDirType" abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isOfType" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- </method>
- <doc>
- <![CDATA[An interface to denote storage directory type
- Implementations can define a type for storage directory by implementing
- this interface.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <class name="Storage.StorageState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <class name="StorageInfo" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="int, int, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getLayoutVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Layout version of the storage data.]]>
- </doc>
- </method>
- <method name="getNamespaceID" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Namespace id of the file system.<p>
- Assigned to the file system at formatting and never changes after that.
- Shared by all file system components.]]>
- </doc>
- </method>
- <method name="getCTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Creation time of the file system state.<p>
- Modified during upgrades.]]>
- </doc>
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="layoutVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namespaceID" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="cTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Common class for storage information.
-
- TODO namespaceID should be long and computed as hash(address + port)]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <interface name="Upgradeable" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Comparable"/>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the upgrade object.
- @return layout version]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the type of the software component, which this object is upgrading.
- @return type]]>
- </doc>
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Description of the upgrade object for displaying.
- @return description]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Upgrade status determines a percentage of the work done out of the total
- amount required by the upgrade.
-
- 100% means that the upgrade is completed.
- Any value < 100 means it is not complete.
-
- The return value should provide at least 2 values, e.g. 0 and 100.
- @return integer value in the range [0, 100].]]>
- </doc>
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Prepare for the upgrade.
- E.g. initialize upgrade data structures and set status to 0.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. name-node informs data-nodes that they must perform a distributed upgrade.
-
- @return an UpgradeCommand for broadcasting.
- @throws IOException]]>
- </doc>
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade.
- E.g. cleanup upgrade data structures or write metadata to disk.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. data-nodes inform the name-node that they completed the upgrade
- while other data-nodes are still upgrading.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get status report for the upgrade.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return {@link UpgradeStatusReport}
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Common interface for distributed upgrade objects.
-
- Each upgrade object corresponds to a layout version,
- which is the latest version that should be upgraded using this object.
- That is all components whose layout version is greater or equal to the
- one returned by {@link #getVersion()} must be upgraded with this object.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <class name="UpgradeManager" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeManager"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeState" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setUpgradeState"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uState" type="boolean"/>
- <param name="uVersion" type="int"/>
- </method>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initializeUpgrade" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isUpgradeCompleted" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="completeUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="currentUpgrades" type="java.util.SortedSet"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeState" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Generic upgrade manager.
-
- {@link #broadcastCommand} is the command that should be]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <class name="UpgradeObject" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- <constructor name="UpgradeObject"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="status" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Abstract upgrade object.
-
- Contains default implementation of common methods of {@link Upgradeable}
- interface.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <class name="UpgradeObjectCollection" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectCollection"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="versionFrom" type="int"/>
- <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Collection of upgrade objects.
- Upgrade objects should be registered here before they can be used.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <class name="UpgradeStatusReport" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="UpgradeStatusReport"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeStatusReport" type="int, short, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the currently running upgrade.
- @return layout version]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
-
- @see Upgradeable#getUpgradeStatus()]]>
- </doc>
- </method>
- <method name="isFinalized" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is current upgrade finalized.
- @return true if finalized or false otherwise.]]>
- </doc>
- </method>
- <method name="getStatusText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <doc>
- <![CDATA[Get upgradeStatus data as a text for reporting.
- Should be overloaded for a particular upgrade specific upgradeStatus data.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return text]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Print basic upgradeStatus details.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="version" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeStatus" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="finalized" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Base upgrade upgradeStatus class.
- Overload this class if specific status fields need to be reported.
-
- Describes status of current upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
- <class name="Util" extends="java.lang.Object"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="Util"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="now" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Current system time.
- @return current time in msec.]]>
- </doc>
- </method>
- <method name="stringAsURI" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="s" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Interprets the passed string as a URI. In case of error it
- assumes the specified string is a file.
- @param s the string to interpret
- @return the resulting URI
- @throws IOException]]>
- </doc>
- </method>
- <method name="fileAsURI" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Converts the passed File to a URI.
- @param f the file to convert
- @return the resulting URI
- @throws IOException]]>
- </doc>
- </method>
- <method name="stringCollectionAsURIs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="names" type="java.util.Collection"/>
- <doc>
- <![CDATA[Converts a collection of strings into a collection of URIs.
- @param names collection of strings to convert to URIs
- @return collection of URIs]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.lang.Runnable"/>
- <implements name="org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean"/>
- <method name="createSocketAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
- </doc>
- </method>
- <method name="getInfoAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[Determine the http server's effective addr]]>
- </doc>
- </method>
- <method name="newSocket" return="java.net.Socket"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
- </doc>
- </method>
- <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the DataNode object]]>
- </doc>
- </method>
- <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="socketTimeout" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNameNodeAddrForClient" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSelfAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDatanodeRegistration" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return DatanodeRegistration]]>
- </doc>
- </method>
- <method name="setNewStorageID"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.
- This method can only be called by the offerService thread.
- Otherwise, deadlock might occur.]]>
- </doc>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="e" type="java.lang.Exception"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check if there is no space in disk
- @param e that caused this checkDiskError call]]>
- </doc>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check if there is a disk failure and if so, handle the error]]>
- </doc>
- </method>
- <method name="offerService"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Main loop for the DataNode. Runs until shutdown,
- forever calling remote NameNode functions.]]>
- </doc>
- </method>
- <method name="notifyNamenodeReceivedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
- That's the loop that connects to the NameNode and provides basic DataNode
- functionality.
- Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
- </doc>
- </method>
- <method name="runDatanodeDaemon"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate a single datanode object. This must be run by invoking
- {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
- </doc>
- </method>
- <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate a single datanode object, along with its secure resources.
- This must be run by invoking{@link DataNode#runDatanodeDaemon(DataNode)}
- subsequently.]]>
- </doc>
- </method>
- <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="scheduleBlockReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="delay" type="long"/>
- <doc>
- <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
- </doc>
- </method>
- <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This method is used for testing.
- Examples are adding and deleting blocks directly.
- The most common usage will be when the data node's storage is similated.
-
- @return the fsdataset that stores the blocks]]>
- </doc>
- </method>
- <method name="secureMain"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="resources" type="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources"/>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- </method>
- <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="java.util.Collection"/>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica with the new generation stamp and length.]]>
- </doc>
- </method>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getStreamingAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRpcPort" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getHttpPort" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getInfoPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNamenodeAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getVolumeInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returned information is a JSON representation of a map with
- volume name as the key and value is a map of volume attribute
- keys to its values]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="EMPTY_DEL_HINT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DataNode is a class (and program) that stores a set of
- blocks for a DFS deployment. A single deployment can
- have one or many DataNodes. Each DataNode communicates
- regularly with a single NameNode. It also communicates
- with client code and other DataNodes from time to time.
- DataNodes store a series of named blocks. The DataNode
- allows client code to read these blocks, or to write new
- block data. The DataNode may also, in response to instructions
- from its NameNode, delete blocks or copy blocks to/from other
- DataNodes.
- The DataNode maintains just one critical table:
- block-> stream of bytes (of BLOCK_SIZE or less)
- This info is stored on a local disk. The DataNode
- reports the table's contents to the NameNode upon startup
- and every so often afterwards.
- DataNodes spend their lives in an endless loop of asking
- the NameNode for something to do. A NameNode cannot connect
- to a DataNode directly; a NameNode simply returns values from
- functions invoked by a DataNode.
- DataNodes maintain an open server socket so that client code
- or other DataNodes can read/write data. The host/port for
- this server is reported to the NameNode, which then sends that
- information to clients or other DataNodes that might be interested.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
- <class name="DatanodeJspHelper" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeJspHelper"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
- <interface name="DataNodeMXBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the version of Hadoop.
-
- @return the version of Hadoop]]>
- </doc>
- </method>
- <method name="getRpcPort" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the rpc port.
-
- @return the rpc port]]>
- </doc>
- </method>
- <method name="getHttpPort" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the http port.
-
- @return the http port]]>
- </doc>
- </method>
- <method name="getNamenodeAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the namenode IP address.
-
- @return the namenode IP address]]>
- </doc>
- </method>
- <method name="getVolumeInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the information of each volume on the Datanode. Please
- see the implementation for the format of returned information.
-
- @return the volume info]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is the JMX management interface for data node information]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.DataNodeMXBean -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isPreUpgradableLayout" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Data storage information file.
- <p>
- @see Storage]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
- <class name="DirectoryScanner" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Periodically scans the data directories for block and block metadata files.
- Reconciles the differences with block information maintained in
- {@link FSDataset}]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DirectoryScanner -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <class name="FSDataset" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
- <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[An FSDataset has a directory where it loads its data files.]]>
- </doc>
- </constructor>
- <method name="getMetaFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="findBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[Return the block file for the given ID]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total space used by dfs datanode]]>
- </doc>
- </method>
- <method name="hasEnoughResource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return true - if there are still valid volumes on the DataNode.]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return total capacity, used and unused]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
- </doc>
- </method>
- <method name="getNumFailedVolumes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the number of failed volumes in the FSDataset.]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Find the block's on-disk length]]>
- </doc>
- </method>
- <method name="getBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get File name for a given block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkOffset" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns handles to the block file and its metadata file]]>
- </doc>
- </method>
- <method name="unlinkBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="numLinks" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Make a copy of the block if this block is linked to an existing
- snapshot. This ensures that modifying this block does not modify
- data in any existing snapshots.
- @param block Block
- @param numLinks Unlink if the number of links exceed this value
- @throws IOException
- @return - true if the specified block was unlinked or the block
- is not in any snapshot.]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverClose"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="adjustCrcChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="checksumSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the offset in the meta file so that the
- last checksum will be overwritten.]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete the block write!]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove the temporary block file (if any)]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Generates a block report from the in-memory block map.]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Check whether the given block is a valid one.
- valid means finalized]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[We're informed that a block is no longer valid. We
- could lazily garbage-collect the block, but why bother?
- just get rid of it.]]>
- </doc>
- </method>
- <method name="getFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Turn the block identifier into a filename; ignore generation stamp!!!]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[check if a data directory is healthy
- if some volumes failed - make sure to remove all the blocks that belong
- to these volumes
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="checkAndUpdate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <param name="diskFile" type="java.io.File"/>
- <param name="diskMetaFile" type="java.io.File"/>
- <param name="vol" type="org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume"/>
- <doc>
- <![CDATA[Reconcile the difference between blocks on the disk and blocks in
- volumeMap
- Check the given block for inconsistencies. Look at the
- current state of the block and reconcile the differences as follows:
- <ul>
- <li>If the block file is missing, delete the block from volumeMap</li>
- <li>If the block file exists and the block is missing in volumeMap,
- add the block to volumeMap <li>
- <li>If generation stamp does not match, then update the block with right
- generation stamp</li>
- <li>If the block length in memory does not match the actual block file length
- then mark the block as corrupt and update the block length in memory</li>
- <li>If the file in {@link ReplicaInfo} does not match the file on
- the disk, update {@link ReplicaInfo} with the correct file</li>
- </ul>
- @param blockId Block that differs
- @param diskFile Block file on the disk
- @param diskMetaFile Metadata file from on the disk
- @param vol Volume of the block file]]>
- </doc>
- </method>
- <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link #fetchReplicaInfo(long)} instead.">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[@deprecated use {@link #fetchReplicaInfo(long)} instead.]]>
- </doc>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newlength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="METADATA_EXTENSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="METADATA_VERSION" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSDataset manages a set of data blocks. Each block
- has a unique name and an extent on disk.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <interface name="FSDatasetInterface" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the length of the metadata file of the specified block
- @param b - the block for which the metadata length is desired
- @return the length of the metadata file for the specified block.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns metaData of block b as an input stream (and its length)
- @param b - the block
- @return the metadata input stream;
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Does the meta file exist for this block?
- @param b - the block
- @return true of the metafile for specified block exits
- @throws IOException]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the specified block's on-disk length (excluding metadata)
- @param b
- @return the specified block's on-disk length (excluding metadta)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getReplica" return="org.apache.hadoop.hdfs.server.datanode.Replica"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[Get reference to the replica meta info in the replicasMap.
- To be called from methods that are synchronized on {@link FSDataset}
- @param blockId
- @return replica from the replicas map]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return the generation stamp stored with the block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream to read the contents of the specified block
- @param b
- @return an input stream to read the contents of the specified block
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- @param b
- @param seekOffset
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkoff" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- The block is still in the tmp directory and is not finalized
- @param b
- @param blkoff
- @param ckoff
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="createTemporary" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates a temporary replica and returns the meta information of the replica
-
- @param b block
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="createRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates a RBW replica and returns the meta info of the replica
-
- @param b block
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="recoverRbw" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="minBytesRcvd" type="long"/>
- <param name="maxBytesRcvd" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recovers a RBW replica and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param minBytesRcvd the minimum number of bytes that the replica could have
- @param maxBytesRcvd the maximum number of bytes that the replica could have
- @return the meta info of the replica which is being written to
- @throws IOException if an error occurs]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Append to a finalized replica and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @return the meata info of the replica which is being written to
- @throws IOException]]>
- </doc>
- </method>
- <method name="recoverAppend" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recover a failed append to a finalized replica
- and returns the meta info of the replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @return the meta info of the replica which is being written to
- @throws IOException]]>
- </doc>
- </method>
- <method name="recoverClose"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newGS" type="long"/>
- <param name="expectedBlockLen" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Recover a failed pipeline close
- It bumps the replica's generation stamp and finalize it if RBW replica
-
- @param b block
- @param newGS the new generation stamp for the replica
- @param expectedBlockLen the number of bytes the replica is expected to have
- @throws IOException]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
- The block size is what is in the parameter b and it must match the amount
- of data written
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
- The temporary file associated with this block is deleted.
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block report - the full list of blocks stored
- @return - the block report - the full list of blocks stored]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Is the block valid?
- @param b
- @return - true if the specified block is valid]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Invalidates the specified blocks
- @param invalidBlks - the blocks to be invalidated
- @throws IOException]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[Check if all the data directories are healthy
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stringifies the name of the storage]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shutdown the FSDataset]]>
- </doc>
- </method>
- <method name="adjustCrcChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="checksumSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the file pointer of the checksum stream so that the last checksum
- will be overwritten
- @param b block
- @param stream The stream for the data file and checksum file
- @param checksumSize number of bytes each checksum has
- @throws IOException]]>
- </doc>
- </method>
- <method name="hasEnoughResource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Checks how many valid storage volumes there are in the DataNode.
- @return true if more than the minimum number of valid volumes are left
- in the FSDataSet.]]>
- </doc>
- </method>
- <method name="getReplicaVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get visible length of the specified replica.]]>
- </doc>
- </method>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a replica recovery.
-
- @return actual state of the replica on this data-node or
- null if data-node does not have the replica.]]>
- </doc>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica's generation stamp and length and finalize it.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is an interface for the underlying storage that stores blocks for
- a data node.
- Examples are the FSDataset (which stores blocks on dirs) and
- SimulatedFSDataset (which simulates data).]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.io.Closeable"/>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class contains the input streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This class contains the output streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This class provides the input stream and length of the metadata
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.Replica -->
- <interface name="Replica" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the block ID]]>
- </doc>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the generation stamp]]>
- </doc>
- </method>
- <method name="getState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the replica state
- @return the replica state]]>
- </doc>
- </method>
- <method name="getNumBytes" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes received
- @return the number of bytes that have been received]]>
- </doc>
- </method>
- <method name="getBytesOnDisk" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes that have written to disk
- @return the number of bytes that have written to disk]]>
- </doc>
- </method>
- <method name="getVisibleLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the number of bytes that are visible to readers
- @return the number of bytes that are visible to readers]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This represents block replicas which are stored in DataNode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.Replica -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
- <class name="ReplicaInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.datanode.Replica"/>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This class is used by datanodes to maintain meta data of its replicas.
- It provides a general interface for meta information of a replica.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
- <class name="ReplicaNotFoundException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ReplicaNotFoundException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="ReplicaNotFoundException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Exception indicating that DataNode does not have a replica
- that matches the target block.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
- <class name="SecureDataNodeStarter" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.commons.daemon.Daemon"/>
- <constructor name="SecureDataNodeStarter"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="init"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="context" type="org.apache.commons.daemon.DaemonContext"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <method name="start"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <method name="destroy"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <doc>
- <![CDATA[Utility class to start a datanode in a secure cluster, first obtaining
- privileged resources before main startup and handing them to the datanode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
- <class name="SecureDataNodeStarter.SecureResources" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="SecureDataNodeStarter.SecureResources" type="java.net.ServerSocket, org.mortbay.jetty.nio.SelectChannelConnector"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getStreamingSocket" return="java.net.ServerSocket"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getListener" return="org.mortbay.jetty.nio.SelectChannelConnector"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Stash necessary resources needed for datanode operation in a secure env.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="UpgradeObjectDatanode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="doUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Specifies how the upgrade is performed.
- @throws IOException]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade and return a status complete command for broadcasting.
-
- Data-nodes finish upgrade at different times.
- The data-node needs to re-confirm with the name-node that the upgrade
- is complete while other nodes are still upgrading.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Base class for data-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode.metrics">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
- The MBean is register using the name
- "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <class name="DataNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="volumeFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various DataNode statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #blocksRead}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- <interface name="FSDatasetMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the total space (in bytes) used by dfs datanode
- @return the total space used by dfs datanode
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
- @return total capacity of storage (used and unused)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the amount of free storage space (in bytes)
- @return The amount of free storage space
- @throws IOException]]>
- </doc>
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the storage id of the underlying storage]]>
- </doc>
- </method>
- <method name="getNumFailedVolumes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the number of failed volumes in the datanode.
- @return The number of failed volumes in the datanode.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
- a data node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
- * Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the FSDatasetMBean is stable and should
- be published as an interface.
-
- <p>
- Data Node runtime statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode">
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
- <class name="BackupNode" extends="org.apache.hadoop.hdfs.server.namenode.NameNode"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getServiceRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setRpcServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="setRpcServiceServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="setHttpServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="loadNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nnReg" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="args" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[BackupNode.
- <p>
- Backup node can play two roles.
- <ol>
- <li>{@link NamenodeRole#CHECKPOINT} node periodically creates checkpoints,
- that is downloads image and edits from the active node, merges them, and
- uploads the new image back to the active.</li>
- <li>{@link NamenodeRole#BACKUP} node keeps its namespace in sync with the
- active node, and periodically creates checkpoints by simply saving the
- namespace image to local disk(s).</li>
- </ol>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
- <class name="BackupStorage" extends="org.apache.hadoop.hdfs.server.namenode.FSImage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="isPreUpgradableLayout" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BackupStorage -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
- <class name="BlockManager" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="processReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="report" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting all its blocks. Use this info to
- update the (machine-->blocklist) and (block-->machinelist) tables.]]>
- </doc>
- </method>
- <field name="DEFAULT_INITIAL_MAP_CAPACITY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_MAP_LOAD_FACTOR" type="float"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_MAX_CORRUPT_FILES_RETURNED" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Keeps information related to the blocks stored in the Hadoop cluster.
- This class is a helper class for {@link FSNamesystem} and requires several
- methods to be called with lock held on {@link FSNamesystem}.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockManager -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
- <class name="BlockPlacementPolicy" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockPlacementPolicy"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="verifyBlockPlacement" return="int"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <param name="minRacks" type="int"/>
- <doc>
- <![CDATA[Verify that the block is replicated on at least minRacks different racks
- if there is more than minRacks rack in the system.
-
- @param srcPath the full pathname of the file to be verified
- @param lBlk block with locations
- @param minRacks number of racks the block should be replicated to
- @return the difference between the required and the actual number of racks
- the block is replicated to.]]>
- </doc>
- </method>
- <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="replicationFactor" type="short"/>
- <param name="existingReplicas" type="java.util.Collection"/>
- <param name="moreExistingReplicas" type="java.util.Collection"/>
- <doc>
- <![CDATA[Decide whether deleting the specified replica of the block still makes
- the block conform to the configured block placement policy.
-
- @param srcInode The inode of the file to which the block-to-be-deleted belongs
- @param block The block to be deleted
- @param replicationFactor The required number of replicas for this block
- @param existingReplicas The replica locations of this block that are present
- on at least two unique racks.
- @param moreExistingReplicas Replica locations of this block that are not
- listed in the previous parameter.
- @return the replica that is the best candidate for deletion]]>
- </doc>
- </method>
- <method name="initialize"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[Used to setup a BlockPlacementPolicy object. This should be defined by
- all implementations of a BlockPlacementPolicy.
-
- @param conf the configuration object
- @param stats retrieve cluster status from here
- @param clusterMap cluster topology]]>
- </doc>
- </method>
- <method name="getInstance" return="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[Get an instance of the configured Block Placement Policy based on the
- value of the configuration paramater dfs.block.replicator.classname.
-
- @param conf the configuration to be used
- @param stats an object that is used to retrieve the load on the cluster
- @param clusterMap the network topology of the cluster
- @return an instance of BlockPlacementPolicy]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used for choosing the desired number of targets
- for placing block replicas.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
- <class name="BlockPlacementPolicy.NotEnoughReplicasException" extends="java.lang.Exception"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy.NotEnoughReplicasException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
- <class name="BlockPlacementPolicyDefault" extends="org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicy"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="stats" type="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="excludedNodes" type="java.util.HashMap"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseTarget" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcInode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="numOfReplicas" type="int"/>
- <param name="writer" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <param name="chosenNodes" type="java.util.List"/>
- <param name="blocksize" type="long"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="verifyBlockPlacement" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="srcPath" type="java.lang.String"/>
- <param name="lBlk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <param name="minRacks" type="int"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="chooseReplicaToDelete" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.FSInodeInfo"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="replicationFactor" type="short"/>
- <param name="first" type="java.util.Collection"/>
- <param name="second" type="java.util.Collection"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[The class is responsible for choosing the desired number of targets
- for placing block replicas.
- The replica placement strategy is that if the writer is on a datanode,
- the 1st replica is placed on the local machine,
- otherwise a random datanode. The 2nd replica is placed on a datanode
- that is on a different rack. The 3rd replica is placed on a datanode
- which is on a different node of the rack as the second replica.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
- <class name="CancelDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CancelDelegationTokenServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="PATH_SPEC" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="TOKEN" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Cancel delegation tokens over http for use in hftp.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="CheckpointSignature"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
- <class name="ContentSummaryServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ContentSummaryServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Servlets for file checksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.ContentSummaryServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <class name="CorruptReplicasMap" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CorruptReplicasMap"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="addToCorruptReplicasMap"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt.
- @param blk Block to be added to CorruptReplicasMap
- @param dn DatanodeDescriptor which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="numCorruptReplicas" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Stores information about all corrupt blocks in the File System.
- A Block is considered corrupt only if all of its replicas are
- corrupt. While reporting replicas of a Block, we hide any corrupt
- copies. These copies are removed once Block is found to have
- expected number of good replicas.
- Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeDescriptor"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
- @param nodeID id of the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param hostName it could be different from host specified for DatanodeID]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param capacity capacity of the data node
- @param dfsUsed space used by the data node
- @param remaining remaing capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param capacity capacity of the data node, including space used by non-dfs
- @param dfsUsed the used space by dfs datanode
- @param remaining remaining capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <method name="numBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlocksScheduled" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return Approximate number of blocks currently scheduled to be written
- to this datanode.]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="obj" type="java.lang.Object"/>
- </method>
- <method name="getVolumeFailures" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return number of failed volumes in the datanode.]]>
- </doc>
- </method>
- <method name="updateRegInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[@param nodeReg DatanodeID to update registration for.]]>
- </doc>
- </method>
- <field name="isAlive" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="needKeyUpdate" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeDescriptor tracks stats on a given DataNode, such as
- available storage capacity, last update time, etc., and maintains a
- set of blocks stored on the datanode.
- This data structure is internal to the namenode. It is *not* sent
- over-the-wire to the Client or the Datanodes. Neither is it stored
- persistently in the fsImage.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Block and targets pair]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <class name="FileChecksumServlets" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Servlets for file checksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.GetServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Get FileChecksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.RedirectServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileDataServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="createUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="parent" type="java.lang.String"/>
- <param name="i" type="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
- <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
- <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="dt" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
- <doc>
- <![CDATA[Create a redirection URI]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/data[/<path>] HTTP/1.1
- }]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <class name="FsckServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FsckServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Handle fsck request]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's web server to do fsck on namenode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
- <interface name="FSClusterStats" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[an indication of the total load of the cluster.
-
- @return a count of the total number of block transfers and block
- writes that are currently occuring on the cluster.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used for retrieving the load related statistics of
- the cluster.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSClusterStats -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <class name="FSEditLog" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="logSync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Sync all modifications done by this thread.
- The internal concurrency design of this class is as follows:
- - Log items are written synchronized into an in-memory buffer,
- and each assigned a transaction ID.
- - When a thread (client) would like to sync all of its edits, logSync()
- uses a ThreadLocal transaction ID to determine what edit number must
- be synced to.
- - The isSyncRunning volatile boolean tracks whether a sync is currently
- under progress.
- The data is double-buffered within each edit log implementation so that
- in-memory writing can occur in parallel with the on-disk writing.
- Each sync occurs in three steps:
- 1. synchronized, it swaps the double buffer and sets the isSyncRunning
- flag.
- 2. unsynchronized, it flushes the data to storage
- 3. synchronized, it resets the flag and notifies anyone waiting on the
- sync.
- The lack of synchronization on step 2 allows other threads to continue
- to write into the memory buffer while the sync is in progress.
- Because this step is unsynchronized, actions that need to avoid
- concurrency with sync() should be synchronized and also call
- waitForSyncToFinish() before assuming they are running alone.]]>
- </doc>
- </method>
- <method name="logOpenFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
- <doc>
- <![CDATA[Add open lease record to edit log.
- Records the block locations of the last block.]]>
- </doc>
- </method>
- <method name="logCloseFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
- <doc>
- <![CDATA[Add close lease record to edit log.]]>
- </doc>
- </method>
- <method name="logMkDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
- <doc>
- <![CDATA[Add create directory record to edit log]]>
- </doc>
- </method>
- <method name="setBufferCapacity"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="size" type="int"/>
- </method>
- <method name="getOutputStreamIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="streamType" type="org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType"/>
- <doc>
- <![CDATA[Get stream iterator for the specified type.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader -->
- <class name="FSEditLogLoader" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FSEditLogLoader" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="setRestoreFailedStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="val" type="boolean"/>
- </method>
- <method name="getRestoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write last checkpoint time and version file into the storage directory.
-
- The version file should always be written last.
- Missing or corrupted version file indicates that
- the checkpoint is not valid.
-
- @param sd storage directory
- @throws IOException]]>
- </doc>
- </method>
- <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isPreUpgradableLayout" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setImageDigest"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="digest" type="org.apache.hadoop.io.MD5Hash"/>
- </method>
- <method name="saveCurrent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save current image and empty journal into {@code current} directory.]]>
- </doc>
- </method>
- <method name="moveCurrent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Move {@code current} to {@code lastcheckpoint.tmp} and
- recreate empty {@code current}.
- {@code current} is moved only if it is well formatted,
- that is contains VERSION file.
-
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()]]>
- </doc>
- </method>
- <method name="moveLastCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Move {@code lastcheckpoint.tmp} to {@code previous.checkpoint}
-
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getPreviousCheckpoint()
- @see org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#getLastCheckpointTmp()]]>
- </doc>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFsEditName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="checkpointTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="editLog" type="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="imageDigest" type="org.apache.hadoop.io.MD5Hash"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="newImageDigest" type="org.apache.hadoop.io.MD5Hash"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="removedStorageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[list of failed (and thus removed) storages]]>
- </doc>
- </field>
- <field name="ckptState" type="org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates"
- transient="false" volatile="true"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Can fs-image be rolled?]]>
- </doc>
- </field>
- <doc>
- <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImageSerialization -->
- <class name="FSImageSerialization" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="readString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readPathComponents" return="byte[][]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInputStream"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Reading the path from the image and converting it to byte[][] directly
- this saves us an array copy and conversions to and from String
- @param in
- @return the array each element of which is a byte[] representation
- of a path component
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Static utility functions for serializing various pieces of data in the correct
- format for the FSImage file.
- Some members are currently public for the benefit of the Offline Image Viewer
- which is located outside of this package. These members should be made
- package-protected when the OIV is refactored.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImageSerialization -->
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
- <interface name="FSInodeInfo" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFullPathName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[a string representation of an inode
-
- @return the full pathname (from root) that this inode represents]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This interface is used used the pluggable block placement policy
- to expose a few characteristics of an Inode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.FSInodeInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <class name="FSNamesystem" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.FSClusterStats"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean"/>
- <method name="getNamespaceDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getStorageDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="propertyName" type="java.lang.String"/>
- </method>
- <method name="getNamespaceEditsDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the default path permission when upgrading from releases with no
- permissions (<=0.15) to releases with permissions (>=0.16)]]>
- </doc>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Close down this file system manager.
- Causes heartbeat and lease daemons to stop; waits briefly for
- them to finish, but a short timeout returns control back to caller.]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="group" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set owner for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="srcs" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Moves all the blocks from srcs and appends them to trg
- To avoid rollbacks we will verify validitity of ALL of the args
- before we start actual move.
- @param target
- @param srcs
- @throws IOException]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[stores the modification and access time for this inode.
- The access time is precise upto an hour. The transaction, if needed, is
- written to the edits log but is not flushed.]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerms" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create a symbolic link.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
-
- The NameNode sets new replication and schedules either replication of
- under-replicated data blocks or removal of the excessive block copies
- if the blocks are over-replicated.
-
- @see ClientProtocol#setReplication(String, short)
- @param src file name
- @param replication new replication
- @return true if successful;
- false if file does not exist or is a directory]]>
- </doc>
- </method>
- <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludedNodes" type="java.util.HashMap"/>
- <exception name="LeaseExpiredException" type="org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException"/>
- <exception name="NotReplicatedYetException" type="org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException"/>
- <exception name="QuotaExceededException" type="org.apache.hadoop.hdfs.protocol.QuotaExceededException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client would like to obtain an additional block for the indicated
- filename (which is being written-to). Return an array that consists
- of the block, plus a set of machines. The first on this list should
- be where the client writes data. Subsequent items in the list must
- be provided in the connection to the first datanode.
- Make sure the previous blocks have been reported by datanodes and
- are replicated. Will return an empty 2-elt array if we want the
- client to "try again later".]]>
- </doc>
- </method>
- <method name="abandonBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="LeaseExpiredException" type="org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException"/>
- <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client would like to let go of the given block]]>
- </doc>
- </method>
- <method name="completeFile" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete in-progress write to the given file.
- @return true if successful, false if the client should continue to retry
- (e.g if not all blocks have reached minimum replication yet)
- @throws IOException on error (eg lease mismatch, file not open, file deleted)]]>
- </doc>
- </method>
- <method name="markBlockAsCorrupt"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt
- @param blk Block to be marked as corrupt
- @param dn Datanode which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="SafeModeException" type="org.apache.hadoop.hdfs.server.namenode.SafeModeException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove the indicated file from namespace.
-
- @see ClientProtocol#delete(String, boolean) for detailed descriptoin and
- description of exceptions]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <doc>
- <![CDATA[Create all the necessary directories]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <param name="needLocation" type="boolean"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a partial listing of the indicated directory
- @param src the directory name
- @param startAfter the name to start after
- @param needLocation if blockLocations need to be returned
- @return a partial listing starting after startAfter
-
- @throws AccessControlException if access is denied
- @throws UnresolvedLinkException if symbolic link is encountered
- @throws IOException if other I/O error occurred]]>
- </doc>
- </method>
- <method name="registerDatanode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- <p>
- The purpose of registration is to identify whether the new datanode
- serves a new data storage, and will report new data block copies,
- which the namenode was not aware of; or the datanode is a replacement
- node for the data storage that was previously served by a different
- or the same (in terms of host:port) datanode.
- The data storages are distinguished by their storageIDs. When a new
- data storage is reported the namenode issues a new unique storageID.
- <p>
- Finally, the namenode returns its namespaceID as the registrationID
- for the datanodes.
- namespaceID is a persistent attribute of the name space.
- The registrationID is checked every time the datanode is communicating
- with the namenode.
- Datanodes with inappropriate registrationID are rejected.
- If the namenode stops, and then restarts it can restore its
- namespaceID and will continue serving the datanodes that has previously
- registered with the namenode without restarting the whole cluster.
-
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
- </doc>
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get registrationID for datanodes based on the namespaceID.
-
- @see #registerDatanode(DatanodeRegistration)
- @see FSImage#newNamespaceID()
- @return registration ID]]>
- </doc>
- </method>
- <method name="computeDatanodeWork" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Compute block replication and block invalidation work
- that can be scheduled on data-nodes.
- The datanode will be informed of this work at the next heartbeat.
-
- @return number of blocks scheduled for replication or removal.
- @throws IOException]]>
- </doc>
- </method>
- <method name="setNodeReplicationLimit"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="limit" type="int"/>
- </method>
- <method name="removeDatanode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove a datanode descriptor.
- @param nodeID datanode ID.
- @throws IOException]]>
- </doc>
- </method>
- <method name="processReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting all its blocks. Use this info to
- update the (machine-->blocklist) and (block-->machinelist) tables.]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting that it received a certain block.]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total raw bytes including non-dfs used space.]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes]]>
- </doc>
- </method>
- <method name="getCapacityUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getCapacityUsedNonDFS" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes for non DFS purposes such
- as storing temporary files on the local file system]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total non-used raw bytes.]]>
- </doc>
- </method>
- <method name="getCapacityRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of connections.]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- </method>
- <method name="DFSNodesStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="live" type="java.util.ArrayList"/>
- <param name="dead" type="java.util.ArrayList"/>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Stop decommissioning the specified datanodes.]]>
- </doc>
- </method>
- <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getStartTime" return="java.util.Date"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rereads the config to get hosts and exclude list file names.
- Rereads the files to update the hosts and exclude lists. It
- checks if any of the hosts have changed states:
- 1. Added to hosts --> no further work needed here.
- 2. Removed from hosts --> mark AdminState as decommissioned.
- 3. Added to exclude --> start decommission.
- 4. Removed from exclude --> stop decommission.]]>
- </doc>
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get data node by storage ID.
-
- @param nodeID
- @return DatanodeDescriptor or null if the node is not found.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the total number of blocks in the system.]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCorruptReplicaBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns number of blocks with corrupt replicas]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPendingDeletionBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getExcessBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockCapacity" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get FSNamesystemMetrics]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[shutdown FSNamesystem]]>
- </doc>
- </method>
- <method name="getNumLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of live data nodes
- @return Number of live data nodes]]>
- </doc>
- </method>
- <method name="getNumDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return Number of dead data nodes]]>
- </doc>
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <method name="numCorruptReplicas" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="getDecommissioningNodes" return="java.util.ArrayList"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDelegationTokenSecretManager" return="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the DelegationTokenSecretManager instance in the namesystem.
- @return delegation token secret manager object]]>
- </doc>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param renewer
- @return Token<DelegationTokenIdentifier>
- @throws IOException]]>
- </doc>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param token
- @return New expiryTime of the token
- @throws InvalidToken
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param token
- @throws IOException]]>
- </doc>
- </method>
- <method name="logUpdateMasterKey"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="org.apache.hadoop.security.token.delegation.DelegationKey"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Log the updateMasterKey operation to edit logs
-
- @param key new delegation key.]]>
- </doc>
- </method>
- <method name="getVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Class representing Namenode information for JMX interfaces]]>
- </doc>
- </method>
- <method name="getUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFree" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSafemode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isUpgradeFinalized" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNonDfsUsedSpace" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPercentUsed" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPercentRemaining" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTotalBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTotalFiles" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getThreads" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLiveNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returned information is a JSON representation of map with host name as the
- key and value is a map of live node attribute keys to its values]]>
- </doc>
- </method>
- <method name="getDeadNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returned information is a JSON representation of map with host name as the
- key and value is a map of dead node attribute keys to its values]]>
- </doc>
- </method>
- <method name="getDecomNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returned information is a JSON representation of map with host name as the
- key and value is a map of decomisioning node attribute keys to its values]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="auditLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Logger for audit events, noting successful FSNamesystem operations. Emits
- to FSNamesystem.audit at INFO. Each event causes a set of tab-separated
- <code>key=value</code> pairs to be written for the following properties:
- <code>
- ugi=<ugi in RPC>
- ip=<remote IP>
- cmd=<command>
- src=<src path>
- dst=<dst path (optional)>
- perm=<permissions (optional)>
- </code>]]>
- </doc>
- </field>
- <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="lmthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSNamesystem does the actual bookkeeping work for the
- DataNode.
- It tracks several important tables.
- 1) valid fsname --> blocklist (kept on disk, logged)
- 2) Set of all valid blocks (inverted #1)
- 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
- 4) machine --> blocklist (inverted #2)
- 5) LRU cache of updated-heartbeat machines]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
- <class name="GetDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="GetDelegationTokenServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="PATH_SPEC" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RENEWER" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Serve delegation tokens over http for use in hftp.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="GetImageServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isValidRequestor" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="remoteUser" type="java.lang.String"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
- Typically used by the Secondary NameNode to retrieve image and
- edit file for periodic checkpointing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
- <class name="INodeSymlink" extends="org.apache.hadoop.hdfs.server.namenode.INode"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="isLink" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLinkValue" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSymlink" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isDirectory" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[An INode representing a symbolic link.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeSymlink -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <class name="LeaseExpiredException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="LeaseExpiredException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The lease that was being used to create this file has expired.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <class name="LeaseManager" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[@return the lease containing src]]>
- </doc>
- </method>
- <method name="countLease" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return the number of leases currently in the system]]>
- </doc>
- </method>
- <method name="setLeasePeriod"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="softLimit" type="long"/>
- <param name="hardLimit" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[LeaseManager does the lease housekeeping for writing on files.
- This class also provides useful static methods for lease recovery.
-
- Lease Recovery Algorithm
- 1) Namenode retrieves lease information
- 2) For each file f in the lease, consider the last block b of f
- 2.1) Get the datanodes which contains b
- 2.2) Assign one of the datanodes as the primary datanode p
- 2.3) p obtains a new generation stamp form the namenode
- 2.4) p get the block info from each datanode
- 2.5) p computes the minimum block length
- 2.6) p updates the datanodes, which have a valid generation stamp,
- with the new generation stamp and the minimum block length
- 2.7) p acknowledges the namenode the update results
- 2.8) Namenode updates the BlockInfo
- 2.9) Namenode removes f from the lease
- and removes the lease once all files have been removed
- 2.10) Namenode commit changes to edit log]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ListPathsServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="buildRoot" return="java.util.Map"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
- <doc>
- <![CDATA[Build a map from the query string, setting values and defaults.]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
- }
- Where <i>option</i> (default) in:
- recursive ("no")
- filter (".*")
- exclude ("\..*\.crc")
- Response: A flat list of files/directories in the following format:
- {@code
- <listing path="..." recursive="(yes|no)" filter="..."
- time="yyyy-MM-dd hh:mm:ss UTC" version="...">
- <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
- <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
- blocksize="..."
- replication="..." size="..."/>
- </listing>
- }]]>
- </doc>
- </method>
- <field name="df" type="java.lang.ThreadLocal"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Obtain meta-information about a filesystem.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <class name="NameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start NameNode.
- <p>
- The name-node can be started with one of the following startup options:
- <ul>
- <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
- <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
- <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
- <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
- <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
- upgrade and create a snapshot of the current file system state</li>
- <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
- cluster back to the previous state</li>
- <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
- previous upgrade</li>
- <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
- </ul>
- The option is passed via configuration field:
- <tt>dfs.namenode.startup</tt>
-
- The conf will be modified to reflect the actual ports on which
- the NameNode is up and running if the user passes the port as
- <code>zero</code> in the conf.
-
- @param conf confirguration
- @throws IOException]]>
- </doc>
- </constructor>
- <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Format a new filesystem. Destroys any filesystem that may already
- exist at this location.]]>
- </doc>
- </method>
- <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="address" type="java.lang.String"/>
- </method>
- <method name="setServiceAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="address" type="java.lang.String"/>
- <doc>
- <![CDATA[Set the configuration property for the service rpc address
- to address]]>
- </doc>
- </method>
- <method name="getServiceAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <param name="fallback" type="boolean"/>
- <doc>
- <![CDATA[Fetches the address for services to use when connecting to namenode
- based on the value of fallback returns null if the special
- address is not specified or returns the default namenode address
- to be used by both clients and services.
- Services here are datanodes, backup node, any non client connection]]>
- </doc>
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="namenode" type="java.net.InetSocketAddress"/>
- </method>
- <method name="getHostPortString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="addr" type="java.net.InetSocketAddress"/>
- <doc>
- <![CDATA[Compose a "host:port" string from the address.]]>
- </doc>
- </method>
- <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getServiceRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Given a configuration get the address of the service rpc server
- If the service rpc is not configured returns null]]>
- </doc>
- </method>
- <method name="getRpcServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setRpcServiceServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[Modifies the configuration passed to contain the service rpc address setting]]>
- </doc>
- </method>
- <method name="setRpcServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getHttpServerAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="setHttpServerAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="loadNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize name-node.
-
- @param conf the configuration]]>
- </doc>
- </method>
- <method name="getInfoServer" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="join"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Wait for service to finish.
- (Normally, it runs forever.)]]>
- </doc>
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
- </doc>
- </method>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journalSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="args" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="renewer" type="org.apache.hadoop.io.Text"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="token" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="flag" type="org.apache.hadoop.io.EnumSetWritable"/>
- <param name="createParent" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="recoverLease" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="previous" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="excludedNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client needs to give up on the block.]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="last" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client has detected an error on the specified located blocks
- and is reporting them to the server. For now, the namenode will
- mark the block as corrupt. In the future we might
- check the blocks are actually corrupt.]]>
- </doc>
- </method>
- <method name="updateBlockForPipeline" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="updatePipeline"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newNodes" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="concat"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="trg" type="java.lang.String"/>
- <param name="src" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getListing" return="org.apache.hadoop.hdfs.protocol.DirectoryListing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="startAfter" type="byte[]"/>
- <param name="needLocation" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getFileLinkInfo" return="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file. If the path refers to a
- symlink then the FileStatus of the symlink is returned.
- @param src The string representation of the path to the file
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="isInSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is the cluster currently in safe mode?]]>
- </doc>
- </method>
- <method name="restoreFailedStorage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <doc>
- <![CDATA[@throws AccessControlException
- @inheritDoc]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the list of datanodes that the namenode should allow to
- connect. Re-reads conf by creating new HdfsConfiguration object and
- uses the files list in the configuration to update the list.]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the size of the current edit log.]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the edit log.]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the image]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode state into specified file]]>
- </doc>
- </method>
- <method name="listCorruptFileBlocks" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="startBlockAfter" type="java.lang.String"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param path
- Sub-tree used in querying corrupt files
- @param startBlockAfter
- Paging support---pass in the last block returned from the previous
- call and some # of corrupt blocks after that point are returned
- @return a list in which each entry describes a corrupt file/block
- @throws AccessControlException
- @throws IOException]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="createSymlink"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <param name="link" type="java.lang.String"/>
- <param name="dirPerms" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="createParent" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="getLinkTarget" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <param name="failedVolumes" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Data node notify the name node that it is alive
- Return an array of block-oriented commands for the datanode to execute.
- This will be either a transfer or a delete operation.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Handle an error report from a datanode.]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="verifyRequest"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify request.
-
- Verifies correctness of the datanode version, registration ID, and
- if the datanode does not need to be shutdown.
-
- @param nodeReg data node registration
- @throws IOException]]>
- </doc>
- </method>
- <method name="verifyVersion"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="version" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify version.
-
- @param version
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFsImageName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file]]>
- </doc>
- </method>
- <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFsImageNameCheckpoint" return="java.io.File[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file uploaded by periodic
- checkpointing]]>
- </doc>
- </method>
- <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address on which the NameNodes is listening to.
- @return the address on which the NameNodes is listening to.]]>
- </doc>
- </method>
- <method name="getHttpAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address of the NameNodes http server,
- which is used to access the name-node web UI.
-
- @return the http address.]]>
- </doc>
- </method>
- <method name="refreshServiceAcl"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="refreshUserToGroupsMappings"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="refreshSuperUserGroupsConfiguration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <field name="DEFAULT_PORT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="stateChangeLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="role" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="serviceRpcServer" type="org.apache.hadoop.ipc.Server"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[RPC server for HDFS Services communication.
- BackupNode, Datanodes and all other services
- should be connecting to this server if it is
- configured. Clients should only go to NameNode#server]]>
- </doc>
- </field>
- <field name="rpcAddress" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[RPC server address]]>
- </doc>
- </field>
- <field name="serviceRPCAddress" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[RPC server for DN address]]>
- </doc>
- </field>
- <field name="httpServer" type="org.apache.hadoop.http.HttpServer"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[httpServer]]>
- </doc>
- </field>
- <field name="httpAddress" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[HTTP server address]]>
- </doc>
- </field>
- <field name="stopRequested" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[only used for testing purposes]]>
- </doc>
- </field>
- <field name="nodeRegistration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Registration information of this name-node]]>
- </doc>
- </field>
- <doc>
- <![CDATA[NameNode serves as both directory namespace manager and
- "inode table" for the Hadoop DFS. There is a single NameNode
- running in any DFS deployment. (Well, except when there
- is a second backup/failover NameNode.)
- The NameNode controls two critical tables:
- 1) filename->blocksequence (namespace)
- 2) block->machinelist ("inodes")
- The first table is stored on disk and is very precious.
- The second table is rebuilt every time the NameNode comes
- up.
- 'NameNode' refers to both this class as well as the 'NameNode server'.
- The 'FSNamesystem' class actually performs most of the filesystem
- management. The majority of the 'NameNode' class itself is concerned
- with exposing the IPC interface and the http server to the outside world,
- plus some configuration management.
- NameNode implements the ClientProtocol interface, which allows
- clients to ask for DFS services. ClientProtocol is not
- designed for direct use by authors of DFS client code. End-users
- should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
- NameNode also implements the DatanodeProtocol interface, used by
- DataNode programs that actually store DFS data blocks. These
- methods are invoked repeatedly and automatically by all the
- DataNodes in a DFS deployment.
- NameNode also implements the NamenodeProtocol interface, used by
- secondary namenodes or rebalancing processes to get partial namenode's
- state, for example partial blocksMap etc.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <class name="NamenodeFsck" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="fsck"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Check files on DFS, starting from the indicated path.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CORRUPT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEALTHY_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NONEXISTENT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FAILURE_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FIXING_NONE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Don't attempt any fixing .]]>
- </doc>
- </field>
- <field name="FIXING_MOVE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Move corrupted files to /lost+found .]]>
- </doc>
- </field>
- <field name="FIXING_DELETE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Delete corrupted files.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link #FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link #FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
- <interface name="NameNodeMXBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the version of Hadoop.
-
- @return the version]]>
- </doc>
- </method>
- <method name="getUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the used space by data nodes.
-
- @return the used space by data nodes]]>
- </doc>
- </method>
- <method name="getFree" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets total non-used raw bytes.
-
- @return total non-used raw bytes]]>
- </doc>
- </method>
- <method name="getTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets total raw bytes including non-dfs used space.
-
- @return the total raw bytes including non-dfs used space]]>
- </doc>
- </method>
- <method name="getSafemode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the safemode status
-
- @return the safemode status]]>
- </doc>
- </method>
- <method name="isUpgradeFinalized" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Checks if upgrade is finalized.
-
- @return true, if upgrade is finalized]]>
- </doc>
- </method>
- <method name="getNonDfsUsedSpace" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets total used space by data nodes for non DFS purposes such as storing
- temporary files on the local file system
-
- @return the non dfs space of the cluster]]>
- </doc>
- </method>
- <method name="getPercentUsed" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the total used space by data nodes as percentage of total capacity
-
- @return the percentage of used space on the cluster.]]>
- </doc>
- </method>
- <method name="getPercentRemaining" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the total remaining space by data nodes as percentage of total
- capacity
-
- @return the percentage of the remaining space on the cluster]]>
- </doc>
- </method>
- <method name="getTotalBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the total numbers of blocks on the cluster.
-
- @return the total number of blocks of the cluster]]>
- </doc>
- </method>
- <method name="getTotalFiles" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the total number of files on the cluster
-
- @return the total number of files on the cluster]]>
- </doc>
- </method>
- <method name="getThreads" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the number of threads.
-
- @return the number of threads]]>
- </doc>
- </method>
- <method name="getLiveNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the live node information of the cluster.
-
- @return the live node information]]>
- </doc>
- </method>
- <method name="getDeadNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the dead node information of the cluster.
-
- @return the dead node information]]>
- </doc>
- </method>
- <method name="getDecomNodes" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the decommissioning node information of the cluster.
-
- @return the decommissioning node information]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is the JMX management interface for namenode information]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.NameNodeMXBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <class name="NotReplicatedYetException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NotReplicatedYetException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The file has not finished being written to enough datanodes yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
- <class name="RenewDelegationTokenServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="RenewDelegationTokenServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="PATH_SPEC" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="TOKEN" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Renew delegation tokens over http for use in hftp.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <class name="SafeModeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="SafeModeException"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when the name node is in safe mode.
- Client cannot modified namespace until the safe mode is off.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <class name="SecondaryNameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a connection to the primary namenode.]]>
- </doc>
- </constructor>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getHttpAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doWork"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
- The Secondary is responsible for supporting periodic checkpoints
- of the HDFS metadata. The current design allows only one Secondary
- NameNode per HDFs cluster.
- The Secondary NameNode is a daemon that periodically wakes
- up (determined by the schedule specified in the configuration),
- triggers a periodic checkpoint and then goes back to sleep.
- The Secondary NameNode uses the ClientProtocol to talk to the
- primary NameNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="StreamFile"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="InterruptedException" type="java.lang.InterruptedException"/>
- <doc>
- <![CDATA[getting a client for connecting to dfs]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="CONTENT_LENGTH" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
- <class name="UnsupportedActionException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnsupportedActionException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when an operation is not supported.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectNamenode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process an upgrade command.
- RPC has only one very generic command for all upgrade related inter
- component communications.
- The actual command recognition and execution should be handled here.
- The reply is sent back also as an UpgradeCommand.
-
- @param command
- @return the reply command which is analyzed on the client side.]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="forceProceed"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Base class for name-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode.metrics">
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <interface name="FSNamesystemMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The state of the file system: Safemode or Operational
- @return the state]]>
- </doc>
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of allocated blocks in the system
- @return - number of allocated blocks]]>
- </doc>
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total storage capacity
- @return - total capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Free (unused) storage capacity
- @return - free capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Used storage capacity
- @return - used capacity in bytes]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of files and directories
- @return - num of files and directories]]>
- </doc>
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks pending to be replicated
- @return - num of blocks to be replicated]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks under replicated
- @return - num of blocks under replicated]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks scheduled for replication
- @return - num of blocks scheduled for replication]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total Load on the FSNamesystem
- @return - total load of FSNamesystem]]>
- </doc>
- </method>
- <method name="getNumLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of Live data nodes
- @return number of live data nodes]]>
- </doc>
- </method>
- <method name="getNumDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return number of dead data nodes]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
- a name node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
-
- Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the NameNodeStateMBean is stable and should
- be published as an interface.
-
- <p>
- Name Node runtime activity statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <class name="FSNamesystemMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.
- We set the metrics value within this function before pushing it out.
- FSNamesystem updates its own local variables which are
- light weight compared to Metrics counters.
- Some of the metrics are explicity casted to int. Few metrics collectors
- do not handle long values. It is safe to cast to int for now as all these
- values fit in int value.
- Metrics related to DFS capacity are stored in bytes which do not fit in
- int, so they are rounded to GB]]>
- </doc>
- </method>
- <field name="numExpiredHeartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various FSNamesystem status metrics
- and publishing them through the metrics interfaces.
- The SNamesystem creates and registers the JMX MBean.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #filesTotal}.set()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
- <class name="NameNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NameNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
- The MBean is register using the name
- "hadoop:service=NameNode,name=NameNodeActivity"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- <class name="NameNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesDeleted" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFileInfoOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numcreateSymlinkOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numgetLinkTargetOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesInGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various NameNode activity statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #syncs}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.protocol">
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockCommand" type="int, java.util.List"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for transferring blocks to another datanode
- @param blocktargetlist blocks to be transferred]]>
- </doc>
- </constructor>
- <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for the given action
- @param blocks blocks related to the action]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A BlockCommand is an instruction to a datanode
- regarding some blocks under its control. It tells
- the DataNode to either invalidate a set of indicated
- blocks, or to copy a set of indicated blocks to
- another DataNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
- <class name="BlockRecoveryCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockRecoveryCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty BlockRecoveryCommand.]]>
- </doc>
- </constructor>
- <constructor name="BlockRecoveryCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockRecoveryCommand with
- the specified capacity for recovering blocks.]]>
- </doc>
- </constructor>
- <method name="getRecoveringBlocks" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the list of recovering blocks.]]>
- </doc>
- </method>
- <method name="add"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <doc>
- <![CDATA[Add recovering block to the command.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[BlockRecoveryCommand is an instruction to a data-node to recover
- the specified blocks.
- The data-node that receives this command treats itself as a primary
- data-node in the recover process.
- Block recovery is identified by a recoveryId, which is also the new
- generation stamp, which the block will have after the recovery succeeds.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
- <class name="BlockRecoveryCommand.RecoveringBlock" extends="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockRecoveryCommand.RecoveringBlock"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty RecoveringBlock.]]>
- </doc>
- </constructor>
- <constructor name="BlockRecoveryCommand.RecoveringBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create RecoveringBlock.]]>
- </doc>
- </constructor>
- <method name="getNewGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the new generation stamp of the block,
- which also plays role of the recovery id.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This is a block with locations from which it should be recovered
- and the new generation stamp, which the block will have after
- successful recovery.
-
- The new generation stamp of the block, also plays role of the recovery id.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <class name="BlocksWithLocations" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor with one parameter]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[getter]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to implement an array of BlockLocations
- It provide efficient customized serialization/deserialization methods
- in stead of using the default array (de)serialization provided by RPC]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations.BlockWithLocations"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[constructor]]>
- </doc>
- </constructor>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block]]>
- </doc>
- </method>
- <method name="getDatanodes" return="java.lang.String[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block's locations]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to keep track of a block and its locations]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
- <class name="CheckpointCommand" extends="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CheckpointCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="CheckpointCommand" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature, boolean, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getSignature" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Checkpoint signature is used to ensure
- that nodes are talking about the same checkpoint.]]>
- </doc>
- </method>
- <method name="isImageObsolete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Indicates whether current backup image is obsolete, and therefore
- need to be discarded?
-
- @return true if current image should be discarded.]]>
- </doc>
- </method>
- <method name="needToReturnImage" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Indicates whether the new checkpoint image needs to be transfered
- back to the name-node after the checkpoint is done.
-
- @return true if the checkpoint should be returned back.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Checkpoint command.
- <p>
- Returned to the backup node by the name-node as a reply to the
- {@link NamenodeProtocol#startCheckpoint(NamenodeRegistration)}
- request.<br>
- Contains:
- <ul>
- <li>{@link CheckpointSignature} identifying the particular checkpoint</li>
- <li>indicator whether the backup image should be discarded before starting
- the checkpoint</li>
- <li>indicator whether the image should be transfered back to the name-node
- upon completion of the checkpoint.</li>
- </ul>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.CheckpointCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <class name="DatanodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Base class for data-node command.
- Issued by the name-node to notify data-nodes what should be done.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <interface name="DatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="registerDatanode" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
- @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
-
- @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
- new storageID if the datanode did not have one and
- registration ID for further communication.]]>
- </doc>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <param name="failedVolumes" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
- alive and well. Includes some status info, too.
- It also gives the NameNode a chance to return
- an array of "DatanodeCommand" objects.
- A DatanodeCommand tells the DataNode to invalidate local block(s),
- or to copy them to other DataNodes, etc.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
- The NameNode returns an array of Blocks that have become obsolete
- and should be deleted. This function is meant to upload *all*
- the locally-stored blocks. It's invoked upon startup and then
- infrequently afterwards.
- @param registration
- @param blocks - the block list as an array of longs.
- Each block is represented as 2 longs.
- This is done instead of Block[] to reduce memory used by block reports.
-
- @return - the next command for DN to process.
- @throws IOException]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
- recently-received block data, with a hint for pereferred replica
- to be deleted when there is any excessive blocks.
- For example, whenever client code
- writes a new Block here, or another DataNode copies a Block to
- this DataNode, it will call blockReceived().]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[errorReport() tells the NameNode about something that has gone
- awry. Useful for debugging.]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This is a very general way to send a command to the name-node during
- distributed upgrade process.
-
- The generosity is because the variety of upgrade commands is unpredictable.
- The reply from the name-node is also received in the form of an upgrade
- command.
-
- @return a reply in the form of an upgrade command]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
- }]]>
- </doc>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Commit block synchronization in lease recovery]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[26: remove getBlockLocations optimization]]>
- </doc>
- </field>
- <field name="NOTIFY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DISK_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="INVALID_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FATAL_DISK_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_UNKNOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Determines actions that data node should perform
- when receiving a datanode command.]]>
- </doc>
- </field>
- <field name="DNA_TRANSFER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_INVALIDATE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_SHUTDOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_REGISTER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_FINALIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_RECOVERBLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_ACCESSKEYUPDATE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
- It's used to upload current load information and block reports.
- The only way a NameNode can communicate with a DataNode is by
- returning values from these functions.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <constructor name="DatanodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor.]]>
- </doc>
- </constructor>
- <constructor name="DatanodeRegistration" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeRegistration]]>
- </doc>
- </constructor>
- <method name="setInfoPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="infoPort" type="int"/>
- </method>
- <method name="setIpcPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="ipcPort" type="int"/>
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
- </method>
- <method name="setName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="exportedKeys" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeRegistration class contains all information the name-node needs
- to identify and verify a data-node when it contacts the name-node.
- This information is sent by data-node with each communication request.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <class name="DisallowedDatanodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a datanode tries to register or communicate
- with the namenode when it does not appear on the list of included nodes,
- or has been specifically excluded.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <interface name="InterDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="initReplicaRecovery" return="org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="rBlock" type="org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Initialize a replica recovery.
-
- @return actual state of the replica on this data-node or
- null if data-node does not have the replica.]]>
- </doc>
- </method>
- <method name="updateReplicaUnderRecovery" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldBlock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="recoveryId" type="long"/>
- <param name="newLength" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update replica with the new generation stamp and length.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[5: getBlockMetaDataInfo(), updateBlock() removed.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An inter-datanode protocol for updating generation stamp]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
- <class name="KeyUpdateCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="KeyUpdateCommand" type="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getExportedKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
- <class name="NamenodeCommand" extends="org.apache.hadoop.hdfs.server.protocol.ServerCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamenodeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamenodeCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Base class for name-node command.
- Issued by the name-node to notify other name-nodes what should be done.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeCommand -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <interface name="NamenodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a list of blocks belonging to <code>datanode</code>
- whose total size equals <code>size</code>.
-
- @see org.apache.hadoop.hdfs.server.balancer.Balancer
- @param datanode a data node
- @param size requested size
- @return a list of blocks & their locations
- @throws RemoteException if size is less than or equal to 0 or
- datanode does not exist]]>
- </doc>
- </method>
- <method name="getBlockKeys" return="org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the current block keys
-
- @return ExportedBlockKeys containing current block keys
- @throws IOException]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the size of the current edit log (in bytes).
- @return The number of bytes in the current edit log.
- @throws IOException
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Closes the current edit log and opens a new one. The
- call fails if the file system is in SafeMode.
- @throws IOException
- @return a unique token to identify this transaction.
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}">
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
- new image to fsImage, removes the old edits and renames edits.new
- to edits. The call fails if any of the four files are missing.
-
- @param sig the signature of this checkpoint (old fsimage)
- @throws IOException
- @deprecated
- See {@link org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode}]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Request name-node version and storage information.
-
- @return {@link NamespaceInfo} identifying versions and storage information
- of the name-node
- @throws IOException]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report to the active name-node an error occurred on a subordinate node.
- Depending on the error code the active node may decide to unregister the
- reporting node.
-
- @param registration requesting node.
- @param errorCode indicates the error
- @param msg free text description of the error
- @throws IOException]]>
- </doc>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register a subordinate name-node like backup node.
- @return {@link NamenodeRegistration} of the node,
- which this node has just registered with.]]>
- </doc>
- </method>
- <method name="startCheckpoint" return="org.apache.hadoop.hdfs.server.protocol.NamenodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A request to the active name-node to start a checkpoint.
- The name-node should decide whether to admit it or reject.
- The name-node also decides what should be done with the backup node
- image before and after the checkpoint.
-
- @see CheckpointCommand
- @see NamenodeCommand
- @see #ACT_SHUTDOWN
-
- @param registration the requesting node
- @return {@link CheckpointCommand} if checkpoint is allowed.
- @throws IOException]]>
- </doc>
- </method>
- <method name="endCheckpoint"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="sig" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A request to the active name-node to finalize
- previously started checkpoint.
-
- @param registration the requesting node
- @param sig {@code CheckpointSignature} which identifies the checkpoint.
- @throws IOException]]>
- </doc>
- </method>
- <method name="journalSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the size of the active name-node journal (edit log) in bytes.
-
- @param registration the requesting node
- @return The number of bytes in the journal.
- @throws IOException]]>
- </doc>
- </method>
- <method name="journal"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration"/>
- <param name="jAction" type="int"/>
- <param name="length" type="int"/>
- <param name="records" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Journal edit records.
- This message is sent by the active name-node to the backup node
- via {@code EditLogBackupOutputStream} in order to synchronize meta-data
- changes with the backup namespace image.
-
- @param registration active node registration
- @param jAction journal action
- @param length length of the byte array
- @param records byte array containing serialized journal records
- @throws IOException]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Compared to the previous version the following changes have been introduced:
- (Only the latest change is reflected.
- The log of historical changes can be retrieved from the svn).
-
- 5: Added one parameter to rollFSImage() and
- changed the definition of CheckpointSignature]]>
- </doc>
- </field>
- <field name="NOTIFY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FATAL" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_IS_ALIVE" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_JOURNAL" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_JSPOOL_START" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="JA_CHECKPOINT_TIME" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_UNKNOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_SHUTDOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ACT_CHECKPOINT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
- It's used to get part of the name node state]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
- <interface name="NamenodeProtocols" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
- <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
- <implements name="org.apache.hadoop.security.RefreshUserMappingsProtocol"/>
- <doc>
- <![CDATA[The full set of RPC methods implemented by the Namenode.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
- <class name="NamenodeRegistration" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.NodeRegistration"/>
- <constructor name="NamenodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamenodeRegistration" type="java.lang.String, java.lang.String, org.apache.hadoop.hdfs.server.common.StorageInfo, org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRole" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get name-node role.]]>
- </doc>
- </method>
- <method name="isRole" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole"/>
- </method>
- <method name="getCheckpointTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the age of the image.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Information sent by a subordinate name-node to the active name-node
- during the registration process.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamespaceInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamespaceInfo" type="int, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDistributedUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[NamespaceInfo is returned by the name-node in reply
- to a data-node handshake.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
- <interface name="NodeRegistration" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get address of the server node.
- @return hostname:portNumber]]>
- </doc>
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get registration ID of the server node.]]>
- </doc>
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get layout version of the server node.]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Generic class specifying information, which need to be sent to the name-node
- during the registration process.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
- <class name="ReplicaRecoveryInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ReplicaRecoveryInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="ReplicaRecoveryInfo" type="long, long, long, org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getOriginalReplicaState" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Replica recovery information.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
- <class name="ServerCommand" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="ServerCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Unknown server command constructor.
- Creates a command with action 0.
-
- @see NamenodeProtocol#ACT_UNKNOWN
- @see DatanodeProtocol#DNA_UNKNOWN]]>
- </doc>
- </constructor>
- <constructor name="ServerCommand" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a command for the specified action.
- Actions are protocol specific.
-
- @see DatanodeProtocol
- @see NamenodeProtocol
- @param action]]>
- </doc>
- </constructor>
- <method name="getAction" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get server command action.
- @return action code.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Base class for a server command.
- Issued by the name-node to notify other servers what should be done.
- Commands are defined by actions defined in respective protocols.
-
- @see DatanodeProtocol
- @see NamenodeProtocol]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.ServerCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeCommand" type="int, int, short"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="UC_ACTION_REPORT_STATUS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UC_ACTION_START_UPGRADE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This as a generic distributed upgrade command.
-
- During the upgrade cluster components send upgrade commands to each other
- in order to obtain or share information with them.
- It is supposed that each upgrade defines specific upgrade command by
- deriving them from this class.
- The upgrade command contains version of the upgrade, which is verified
- on the receiving side and current status of the upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- </package>
- <package name="org.apache.hadoop.hdfs.tools">
- <!-- start class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
- <class name="DelegationTokenFetcher" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DelegationTokenFetcher"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Command-line interface]]>
- </doc>
- </method>
- <method name="getDTfromRemote" return="org.apache.hadoop.security.Credentials"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nnAddr" type="java.lang.String"/>
- <param name="renewer" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="renewDelegationToken" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nnAddr" type="java.lang.String"/>
- <param name="tok" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Renew a Delegation Token.
- @param nnAddr the NameNode's address
- @param tok the token to renew
- @return the Date that the token will expire next.
- @throws IOException]]>
- </doc>
- </method>
- <method name="cancelDelegationToken"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nnAddr" type="java.lang.String"/>
- <param name="tok" type="org.apache.hadoop.security.token.Token"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Cancel a Delegation Token.
- @param nnAddr the NameNode's address
- @param tok the token to cancel
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Fetch a DelegationToken from the current Namenode and store it in the
- specified file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DelegationTokenFetcher -->
- <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSAdmin"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <method name="report"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Gives a report on how the FileSystem is doing.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="setSafeMode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Safe mode maintenance command.
- Usage: java DFSAdmin -safemode [enter | leave | get]
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="saveNamespace" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to save the namespace.
- Usage: java DFSAdmin -saveNamespace
- @exception IOException
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="restoreFaileStorage" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="arg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to enable/disable/check restoring of failed storage replicas in the namenode.
- Usage: java DFSAdmin -restoreFailedStorage true|false|check
- @exception IOException
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)]]>
- </doc>
- </method>
- <method name="refreshNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
- file.
- Usage: java DFSAdmin -refreshNodes
- @exception IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
- Usage: java DFSAdmin -finalizeUpgrade
- @exception IOException]]>
- </doc>
- </method>
- <method name="upgradeProgress" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to request current distributed upgrade status,
- a detailed status, or to force the upgrade to proceed.
-
- Usage: java DFSAdmin -upgradeProgress [status | details | force]
- @exception IOException]]>
- </doc>
- </method>
- <method name="metaSave" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
- Usage: java DFSAdmin -metasave filename
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if an error accoured wile accessing
- the file or path.]]>
- </doc>
- </method>
- <method name="printTopology" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Display each rack and the nodes assigned to that rack, as determined
- by the NameNode, in a hierarchical manner. The nodes and racks are
- sorted alphabetically.
-
- @throws IOException If an error while getting datanode report]]>
- </doc>
- </method>
- <method name="refreshServiceAcl" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the authorization policy on the {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="refreshUserToGroupsMappings" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the user-to-groups mappings on the {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="refreshSuperUserGroupsConfiguration" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[refreshSuperUserGroupsConfiguration {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@param argv The parameters passed to this program.
- @exception Exception if the filesystem does not exist.
- @return 0 on success, non zero on error.]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides some DFS administrative access.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
- <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Filesystem checker.
- @param conf current Configuration]]>
- </doc>
- </constructor>
- <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration, java.io.PrintStream"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.
- The tool also provides and option to filter open files during the scan.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
- <!-- start class org.apache.hadoop.hdfs.tools.HDFSConcat -->
- <class name="HDFSConcat" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HDFSConcat"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.HDFSConcat -->
- <!-- start class org.apache.hadoop.hdfs.tools.JMXGet -->
- <class name="JMXGet" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="JMXGet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="setService"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="service" type="java.lang.String"/>
- </method>
- <method name="setPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="port" type="java.lang.String"/>
- </method>
- <method name="setServer"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="server" type="java.lang.String"/>
- </method>
- <method name="setLocalVMUrl"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="url" type="java.lang.String"/>
- </method>
- <method name="printAllValues"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[print all attributes' values]]>
- </doc>
- </method>
- <method name="getValue" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.String"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[get single value by key]]>
- </doc>
- </method>
- <method name="init"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@throws Exception
- initializes MBeanServer]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <doc>
- <![CDATA[main
-
- @param args]]>
- </doc>
- </method>
- <doc>
- <![CDATA[tool to get data from NameNode or DataNode using MBeans currently the
- following MBeans are available (under hadoop domain):
- hadoop:service=NameNode,name=FSNamesystemState (static)
- hadoop:service=NameNode,name=NameNodeActivity (dynamic)
- hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
- hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
- hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
- (static)
- hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215
- (dynamic)
-
-
- implementation note: all logging is sent to System.err (since it is a command
- line tool)]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.JMXGet -->
- </package>
- <package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
- <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.NameDistributionVisitor -->
- <class name="NameDistributionVisitor" extends="org.apache.hadoop.hdfs.tools.offlineImageViewer.TextWriterImageVisitor"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NameDistributionVisitor" type="java.lang.String, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <doc>
- <![CDATA[File name distribution visitor.
- <p>
- It analyzes file names in fsimage and prints the following information:
- <li>Number of unique file names</li>
- <li>Number file names and the corresponding number range of files that use
- these same names</li>
- <li>Heap saved if the file name objects are reused</li>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.NameDistributionVisitor -->
- <!-- start class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
- <class name="OfflineImageViewer" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="OfflineImageViewer" type="java.lang.String, org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="go"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process image file.]]>
- </doc>
- </method>
- <method name="buildOptions" return="org.apache.commons.cli.Options"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Build command-line options and descriptions]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Entry point to command-line-driven operation. User may specify
- options and start fsimage viewer from the command line. Program
- will process image file and exit cleanly or, if an error is
- encountered, inform user and exit.
- @param args Command line options
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[OfflineImageViewer to dump the contents of an Hadoop image file to XML
- or the console. Main entry point into utility, either via the
- command line or programatically.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer -->
- </package>
- <package name="org.apache.hadoop.hdfs.util">
- <!-- start class org.apache.hadoop.hdfs.util.ByteArray -->
- <class name="ByteArray" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ByteArray" type="byte[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBytes" return="byte[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <doc>
- <![CDATA[Wrapper for byte[] to use byte[] as key in HashMap]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.util.ByteArray -->
- <!-- start class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
- <class name="DataTransferThrottler" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataTransferThrottler" type="long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param bandwidthPerSec bandwidth allowed in bytes per second.]]>
- </doc>
- </constructor>
- <constructor name="DataTransferThrottler" type="long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param period in milliseconds. Bandwidth is enforced over this
- period.
- @param bandwidthPerSec bandwidth allowed in bytes per second.]]>
- </doc>
- </constructor>
- <method name="getBandwidth" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return current throttle bandwidth in bytes per second.]]>
- </doc>
- </method>
- <method name="setBandwidth"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bytesPerSecond" type="long"/>
- <doc>
- <![CDATA[Sets throttle bandwidth. This takes affect latest by the end of current
- period.
-
- @param bytesPerSecond]]>
- </doc>
- </method>
- <method name="throttle"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="numOfBytes" type="long"/>
- <doc>
- <![CDATA[Given the numOfBytes sent/received since last time throttle was called,
- make the current thread sleep if I/O rate is too fast
- compared to the given bandwidth.
- @param numOfBytes
- number of bytes sent/received since last time throttle was called]]>
- </doc>
- </method>
- <doc>
- <![CDATA[a class to throttle the data transfers.
- This class is thread safe. It can be shared by multiple threads.
- The parameter bandwidthPerSec specifies the total bandwidth shared by
- threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.util.DataTransferThrottler -->
- <!-- start interface org.apache.hadoop.hdfs.util.GSet -->
- <interface name="GSet" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Iterable"/>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return The size of this set.]]>
- </doc>
- </method>
- <method name="contains" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- <doc>
- <![CDATA[Does this set contain an element corresponding to the given key?
- @param key The given key.
- @return true if the given key equals to a stored element.
- Otherwise, return false.
- @throws NullPointerException if key == null.]]>
- </doc>
- </method>
- <method name="get" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- <doc>
- <![CDATA[Return the stored element which is equal to the given key.
- This operation is similar to {@link java.util.Map#get(Object)}.
- @param key The given key.
- @return The stored element if it exists.
- Otherwise, return null.
- @throws NullPointerException if key == null.]]>
- </doc>
- </method>
- <method name="put" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="element" type="java.lang.Object"/>
- <doc>
- <![CDATA[Add/replace an element.
- If the element does not exist, add it to the set.
- Otherwise, replace the existing element.
- Note that this operation
- is similar to {@link java.util.Map#put(Object, Object)}
- but is different from {@link java.util.Set#add(Object)}
- which does not replace the existing element if there is any.
- @param element The element being put.
- @return the previous stored element if there is any.
- Otherwise, return null.
- @throws NullPointerException if element == null.]]>
- </doc>
- </method>
- <method name="remove" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- <doc>
- <![CDATA[Remove the element corresponding to the given key.
- This operation is similar to {@link java.util.Map#remove(Object)}.
- @param key The key of the element being removed.
- @return If such element exists, return it.
- Otherwise, return null.
- @throws NullPointerException if key == null.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A {@link GSet} is set,
- which supports the {@link #get(Object)} operation.
- The {@link #get(Object)} operation uses a key to lookup an element.
-
- Null element is not supported.
-
- @param <K> The type of the keys.
- @param <E> The type of the elements, which must be a subclass of the keys.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.util.GSet -->
- <!-- start class org.apache.hadoop.hdfs.util.GSetByHashMap -->
- <class name="GSetByHashMap" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.util.GSet"/>
- <constructor name="GSetByHashMap" type="int, float"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="contains" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="k" type="java.lang.Object"/>
- </method>
- <method name="get" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="k" type="java.lang.Object"/>
- </method>
- <method name="put" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="element" type="java.lang.Object"/>
- </method>
- <method name="remove" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="k" type="java.lang.Object"/>
- </method>
- <method name="iterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[A {@link GSet} implementation by {@link HashMap}.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.util.GSetByHashMap -->
- <!-- start class org.apache.hadoop.hdfs.util.LightWeightGSet -->
- <class name="LightWeightGSet" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.util.GSet"/>
- <constructor name="LightWeightGSet" type="int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@param recommended_length Recommended size of the internal array.]]>
- </doc>
- </constructor>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="get" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- </method>
- <method name="contains" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- </method>
- <method name="put" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="element" type="java.lang.Object"/>
- </method>
- <method name="remove" return="java.lang.Object"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="key" type="java.lang.Object"/>
- </method>
- <method name="iterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="printDetails"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.PrintStream"/>
- <doc>
- <![CDATA[Print detailed information of this object.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A low memory footprint {@link GSet} implementation,
- which uses an array for storing the elements
- and linked lists for collision resolution.
- No rehash will be performed.
- Therefore, the internal array will never be resized.
- This class does not support null element.
- This class is not thread safe.
- @param <K> Key type for looking up the elements
- @param <E> Element type, which must be
- (1) a subclass of K, and
- (2) implementing {@link LinkedElement} interface.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.util.LightWeightGSet -->
- <!-- start interface org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement -->
- <interface name="LightWeightGSet.LinkedElement" abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="setNext"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="next" type="org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement"/>
- <doc>
- <![CDATA[Set the next element.]]>
- </doc>
- </method>
- <method name="getNext" return="org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the next element.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Elements of {@link LightWeightGSet}.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement -->
- </package>
- </api>
|